1pub use solana_perf::report_target_features;
4use {
5 crate::{
6 admin_rpc_post_init::{AdminRpcRequestMetadataPostInit, KeyUpdaterType, KeyUpdaters},
7 banking_stage::{
8 transaction_scheduler::scheduler_controller::SchedulerConfig, BankingStage,
9 },
10 banking_trace::{self, BankingTracer, TraceError},
11 cluster_info_vote_listener::VoteTracker,
12 completed_data_sets_service::CompletedDataSetsService,
13 consensus::{
14 reconcile_blockstore_roots_with_external_source,
15 tower_storage::{NullTowerStorage, TowerStorage},
16 ExternalRootSource, Tower,
17 },
18 repair::{
19 self,
20 quic_endpoint::{RepairQuicAsyncSenders, RepairQuicSenders, RepairQuicSockets},
21 repair_handler::RepairHandlerType,
22 serve_repair_service::ServeRepairService,
23 },
24 resource_limits::{
25 adjust_nofile_limit, validate_memlock_limit_for_disk_io, ResourceLimitError,
26 },
27 sample_performance_service::SamplePerformanceService,
28 sigverify,
29 snapshot_packager_service::SnapshotPackagerService,
30 stats_reporter_service::StatsReporterService,
31 system_monitor_service::{
32 verify_net_stats_access, SystemMonitorService, SystemMonitorStatsReportConfig,
33 },
34 tpu::{ForwardingClientOption, Tpu, TpuSockets},
35 tvu::{Tvu, TvuConfig, TvuSockets},
36 },
37 agave_snapshots::{
38 snapshot_archive_info::SnapshotArchiveInfoGetter as _, snapshot_config::SnapshotConfig,
39 snapshot_hash::StartingSnapshotHashes, SnapshotInterval,
40 },
41 anyhow::{anyhow, Context, Result},
42 crossbeam_channel::{bounded, unbounded, Receiver},
43 quinn::Endpoint,
44 serde::{Deserialize, Serialize},
45 solana_account::ReadableAccount,
46 solana_accounts_db::{
47 accounts_db::{AccountsDbConfig, ACCOUNTS_DB_CONFIG_FOR_TESTING},
48 accounts_update_notifier_interface::AccountsUpdateNotifier,
49 utils::move_and_async_delete_path_contents,
50 },
51 solana_client::{
52 client_option::ClientOption,
53 connection_cache::{ConnectionCache, Protocol},
54 },
55 solana_clock::Slot,
56 solana_cluster_type::ClusterType,
57 solana_entry::poh::compute_hash_time,
58 solana_epoch_schedule::MAX_LEADER_SCHEDULE_EPOCH_OFFSET,
59 solana_genesis_config::GenesisConfig,
60 solana_genesis_utils::{
61 open_genesis_config, OpenGenesisConfigError, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
62 },
63 solana_geyser_plugin_manager::{
64 geyser_plugin_service::GeyserPluginService, GeyserPluginManagerRequest,
65 },
66 solana_gossip::{
67 cluster_info::{
68 ClusterInfo, DEFAULT_CONTACT_DEBUG_INTERVAL_MILLIS,
69 DEFAULT_CONTACT_SAVE_INTERVAL_MILLIS,
70 },
71 contact_info::ContactInfo,
72 crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
73 gossip_service::GossipService,
74 node::{Node, NodeMultihoming},
75 },
76 solana_hard_forks::HardForks,
77 solana_hash::Hash,
78 solana_keypair::Keypair,
79 solana_ledger::{
80 bank_forks_utils,
81 blockstore::{
82 Blockstore, BlockstoreError, PurgeType, MAX_COMPLETED_SLOTS_IN_CHANNEL,
83 MAX_REPLAY_WAKE_UP_SIGNALS,
84 },
85 blockstore_metric_report_service::BlockstoreMetricReportService,
86 blockstore_options::{BlockstoreOptions, BLOCKSTORE_DIRECTORY_ROCKS_LEVEL},
87 blockstore_processor::{self, TransactionStatusSender},
88 entry_notifier_interface::EntryNotifierArc,
89 entry_notifier_service::{EntryNotifierSender, EntryNotifierService},
90 leader_schedule::FixedSchedule,
91 leader_schedule_cache::LeaderScheduleCache,
92 use_snapshot_archives_at_startup::UseSnapshotArchivesAtStartup,
93 },
94 solana_measure::measure::Measure,
95 solana_metrics::{datapoint_info, metrics::metrics_config_sanity_check},
96 solana_poh::{
97 poh_controller::PohController,
98 poh_recorder::PohRecorder,
99 poh_service::{self, PohService},
100 record_channels::record_channels,
101 transaction_recorder::TransactionRecorder,
102 },
103 solana_pubkey::Pubkey,
104 solana_rayon_threadlimit::get_thread_count,
105 solana_rpc::{
106 max_slots::MaxSlots,
107 optimistically_confirmed_bank_tracker::{
108 BankNotificationSenderConfig, OptimisticallyConfirmedBank,
109 OptimisticallyConfirmedBankTracker,
110 },
111 rpc::JsonRpcConfig,
112 rpc_completed_slots_service::RpcCompletedSlotsService,
113 rpc_pubsub_service::{PubSubConfig, PubSubService},
114 rpc_service::{JsonRpcService, JsonRpcServiceConfig},
115 rpc_subscriptions::RpcSubscriptions,
116 transaction_notifier_interface::TransactionNotifierArc,
117 transaction_status_service::TransactionStatusService,
118 },
119 solana_runtime::{
120 accounts_background_service::{
121 AbsRequestHandlers, AccountsBackgroundService, DroppedSlotsReceiver,
122 PendingSnapshotPackages, PrunedBanksRequestHandler, SnapshotRequestHandler,
123 },
124 bank::Bank,
125 bank_forks::BankForks,
126 commitment::BlockCommitmentCache,
127 dependency_tracker::DependencyTracker,
128 prioritization_fee_cache::PrioritizationFeeCache,
129 runtime_config::RuntimeConfig,
130 snapshot_bank_utils,
131 snapshot_controller::SnapshotController,
132 snapshot_utils::{self, clean_orphaned_account_snapshot_dirs},
133 },
134 solana_send_transaction_service::send_transaction_service::Config as SendTransactionServiceConfig,
135 solana_shred_version::compute_shred_version,
136 solana_signer::Signer,
137 solana_streamer::{
138 nonblocking::{simple_qos::SimpleQosConfig, swqos::SwQosConfig},
139 quic::{QuicStreamerConfig, SimpleQosQuicStreamerConfig, SwQosQuicStreamerConfig},
140 socket::SocketAddrSpace,
141 streamer::StakedNodes,
142 },
143 solana_time_utils::timestamp,
144 solana_tpu_client::tpu_client::{
145 DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_USE_QUIC, DEFAULT_VOTE_USE_QUIC,
146 },
147 solana_turbine::{
148 self,
149 broadcast_stage::BroadcastStageType,
150 xdp::{master_ip_if_bonded, XdpConfig, XdpRetransmitter},
151 },
152 solana_unified_scheduler_pool::DefaultSchedulerPool,
153 solana_validator_exit::Exit,
154 solana_vote_program::vote_state::VoteStateV4,
155 solana_wen_restart::wen_restart::{wait_for_wen_restart, WenRestartConfig},
156 std::{
157 borrow::Cow,
158 collections::{HashMap, HashSet},
159 net::{IpAddr, SocketAddr},
160 num::{NonZeroU64, NonZeroUsize},
161 path::{Path, PathBuf},
162 str::FromStr,
163 sync::{
164 atomic::{AtomicBool, AtomicU64, Ordering},
165 Arc, Mutex, RwLock,
166 },
167 thread::{sleep, Builder, JoinHandle},
168 time::{Duration, Instant},
169 },
170 strum::VariantNames,
171 strum_macros::{Display, EnumCount, EnumIter, EnumString, EnumVariantNames, IntoStaticStr},
172 thiserror::Error,
173 tokio::runtime::Runtime as TokioRuntime,
174 tokio_util::sync::CancellationToken,
175};
176
177const MAX_COMPLETED_DATA_SETS_IN_CHANNEL: usize = 100_000;
178const WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT: u64 = 80;
179const WAIT_FOR_WEN_RESTART_SUPERMAJORITY_THRESHOLD_PERCENT: u64 =
183 WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT;
184
185#[derive(
186 Clone, EnumCount, EnumIter, EnumString, EnumVariantNames, Default, IntoStaticStr, Display,
187)]
188#[strum(serialize_all = "kebab-case")]
189pub enum BlockVerificationMethod {
190 BlockstoreProcessor,
191 #[default]
192 UnifiedScheduler,
193}
194
195impl BlockVerificationMethod {
196 pub const fn cli_names() -> &'static [&'static str] {
197 Self::VARIANTS
198 }
199
200 pub fn cli_message() -> &'static str {
201 "Switch transaction scheduling method for verifying ledger entries"
202 }
203}
204
205#[derive(
206 Clone,
207 Debug,
208 EnumString,
209 EnumVariantNames,
210 Default,
211 IntoStaticStr,
212 Display,
213 Serialize,
214 Deserialize,
215 PartialEq,
216 Eq,
217)]
218#[strum(serialize_all = "kebab-case")]
219#[serde(rename_all = "kebab-case")]
220pub enum BlockProductionMethod {
221 CentralScheduler,
222 #[default]
223 CentralSchedulerGreedy,
224}
225
226impl BlockProductionMethod {
227 pub const fn cli_names() -> &'static [&'static str] {
228 Self::VARIANTS
229 }
230
231 pub fn cli_message() -> &'static str {
232 "Switch transaction scheduling method for producing ledger entries"
233 }
234}
235
236#[derive(
237 Clone,
238 Debug,
239 EnumString,
240 EnumVariantNames,
241 Default,
242 IntoStaticStr,
243 Display,
244 Serialize,
245 Deserialize,
246 PartialEq,
247 Eq,
248)]
249#[strum(serialize_all = "kebab-case")]
250#[serde(rename_all = "kebab-case")]
251pub enum TransactionStructure {
252 Sdk,
253 #[default]
254 View,
255}
256
257impl TransactionStructure {
258 pub const fn cli_names() -> &'static [&'static str] {
259 Self::VARIANTS
260 }
261
262 pub fn cli_message() -> &'static str {
263 "DEPRECATED: has no impact on banking stage; will be removed in a future version"
264 }
265}
266
267#[derive(
268 Clone, Debug, EnumVariantNames, IntoStaticStr, Display, Serialize, Deserialize, PartialEq, Eq,
269)]
270#[strum(serialize_all = "kebab-case")]
271#[serde(rename_all = "kebab-case")]
272pub enum SchedulerPacing {
273 Disabled,
274 FillTimeMillis(NonZeroU64),
275}
276
277impl FromStr for SchedulerPacing {
278 type Err = String;
279
280 fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
281 if s.eq_ignore_ascii_case("disabled") {
282 Ok(SchedulerPacing::Disabled)
283 } else {
284 match s.parse::<u64>() {
285 Ok(v) if v > 0 => Ok(SchedulerPacing::FillTimeMillis(
286 NonZeroU64::new(v).ok_or_else(|| "value must be non-zero".to_string())?,
287 )),
288 _ => Err("value must be a positive integer or 'disabled'".to_string()),
289 }
290 }
291 }
292}
293
294impl SchedulerPacing {
295 pub fn fill_time(&self) -> Option<Duration> {
296 match self {
297 SchedulerPacing::Disabled => None,
298 SchedulerPacing::FillTimeMillis(millis) => Some(Duration::from_millis(millis.get())),
299 }
300 }
301}
302
303#[derive(Clone, Debug)]
305pub struct GeneratorConfig {
306 pub accounts_path: String,
307 pub starting_keypairs: Arc<Vec<Keypair>>,
308}
309
310pub struct ValidatorConfig {
311 pub halt_at_slot: Option<Slot>,
312 pub expected_genesis_hash: Option<Hash>,
313 pub expected_bank_hash: Option<Hash>,
314 pub expected_shred_version: Option<u16>,
315 pub voting_disabled: bool,
316 pub account_paths: Vec<PathBuf>,
317 pub account_snapshot_paths: Vec<PathBuf>,
318 pub rpc_config: JsonRpcConfig,
319 pub on_start_geyser_plugin_config_files: Option<Vec<PathBuf>>,
321 pub geyser_plugin_always_enabled: bool,
322 pub rpc_addrs: Option<(SocketAddr, SocketAddr)>, pub pubsub_config: PubSubConfig,
324 pub snapshot_config: SnapshotConfig,
325 pub max_ledger_shreds: Option<u64>,
326 pub blockstore_options: BlockstoreOptions,
327 pub broadcast_stage_type: BroadcastStageType,
328 pub turbine_disabled: Arc<AtomicBool>,
329 pub fixed_leader_schedule: Option<FixedSchedule>,
330 pub wait_for_supermajority: Option<Slot>,
331 pub new_hard_forks: Option<Vec<Slot>>,
332 pub known_validators: Option<HashSet<Pubkey>>, pub repair_validators: Option<HashSet<Pubkey>>, pub repair_whitelist: Arc<RwLock<HashSet<Pubkey>>>, pub gossip_validators: Option<HashSet<Pubkey>>, pub max_genesis_archive_unpacked_size: u64,
337 pub run_verification: bool,
340 pub require_tower: bool,
341 pub tower_storage: Arc<dyn TowerStorage>,
342 pub debug_keys: Option<Arc<HashSet<Pubkey>>>,
343 pub contact_debug_interval: u64,
344 pub contact_save_interval: u64,
345 pub send_transaction_service_config: SendTransactionServiceConfig,
346 pub no_poh_speed_test: bool,
347 pub no_os_memory_stats_reporting: bool,
348 pub no_os_network_stats_reporting: bool,
349 pub no_os_cpu_stats_reporting: bool,
350 pub no_os_disk_stats_reporting: bool,
351 pub enforce_ulimit_nofile: bool,
352 pub poh_pinned_cpu_core: usize,
353 pub poh_hashes_per_batch: u64,
354 pub process_ledger_before_services: bool,
355 pub accounts_db_config: AccountsDbConfig,
356 pub warp_slot: Option<Slot>,
357 pub accounts_db_skip_shrink: bool,
358 pub accounts_db_force_initial_clean: bool,
359 pub staked_nodes_overrides: Arc<RwLock<HashMap<Pubkey, u64>>>,
360 pub validator_exit: Arc<RwLock<Exit>>,
361 pub validator_exit_backpressure: HashMap<String, Arc<AtomicBool>>,
362 pub no_wait_for_vote_to_start_leader: bool,
363 pub wait_to_vote_slot: Option<Slot>,
364 pub runtime_config: RuntimeConfig,
365 pub banking_trace_dir_byte_limit: banking_trace::DirByteLimit,
366 pub block_verification_method: BlockVerificationMethod,
367 pub block_production_method: BlockProductionMethod,
368 pub block_production_num_workers: NonZeroUsize,
369 pub block_production_scheduler_config: SchedulerConfig,
370 pub enable_block_production_forwarding: bool,
371 pub generator_config: Option<GeneratorConfig>,
372 pub use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup,
373 pub wen_restart_proto_path: Option<PathBuf>,
374 pub wen_restart_coordinator: Option<Pubkey>,
375 pub unified_scheduler_handler_threads: Option<usize>,
376 pub ip_echo_server_threads: NonZeroUsize,
377 pub rayon_global_threads: NonZeroUsize,
378 pub replay_forks_threads: NonZeroUsize,
379 pub replay_transactions_threads: NonZeroUsize,
380 pub tvu_shred_sigverify_threads: NonZeroUsize,
381 pub delay_leader_block_for_pending_fork: bool,
382 pub use_tpu_client_next: bool,
383 pub retransmit_xdp: Option<XdpConfig>,
384 pub repair_handler_type: RepairHandlerType,
385}
386
387impl ValidatorConfig {
388 pub fn default_for_test() -> Self {
389 let max_thread_count =
390 NonZeroUsize::new(num_cpus::get()).expect("thread count is non-zero");
391
392 Self {
393 halt_at_slot: None,
394 expected_genesis_hash: None,
395 expected_bank_hash: None,
396 expected_shred_version: None,
397 voting_disabled: false,
398 max_ledger_shreds: None,
399 blockstore_options: BlockstoreOptions::default_for_tests(),
400 account_paths: Vec::new(),
401 account_snapshot_paths: Vec::new(),
402 rpc_config: JsonRpcConfig::default_for_test(),
403 on_start_geyser_plugin_config_files: None,
404 geyser_plugin_always_enabled: false,
405 rpc_addrs: None,
406 pubsub_config: PubSubConfig::default(),
407 snapshot_config: SnapshotConfig::new_load_only(),
408 broadcast_stage_type: BroadcastStageType::Standard,
409 turbine_disabled: Arc::<AtomicBool>::default(),
410 fixed_leader_schedule: None,
411 wait_for_supermajority: None,
412 new_hard_forks: None,
413 known_validators: None,
414 repair_validators: None,
415 repair_whitelist: Arc::new(RwLock::new(HashSet::default())),
416 gossip_validators: None,
417 max_genesis_archive_unpacked_size: MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
418 run_verification: true,
419 require_tower: false,
420 tower_storage: Arc::new(NullTowerStorage::default()),
421 debug_keys: None,
422 contact_debug_interval: DEFAULT_CONTACT_DEBUG_INTERVAL_MILLIS,
423 contact_save_interval: DEFAULT_CONTACT_SAVE_INTERVAL_MILLIS,
424 send_transaction_service_config: SendTransactionServiceConfig::default(),
425 no_poh_speed_test: true,
426 no_os_memory_stats_reporting: true,
427 no_os_network_stats_reporting: true,
428 no_os_cpu_stats_reporting: true,
429 no_os_disk_stats_reporting: true,
430 enforce_ulimit_nofile: false,
432 poh_pinned_cpu_core: poh_service::DEFAULT_PINNED_CPU_CORE,
433 poh_hashes_per_batch: poh_service::DEFAULT_HASHES_PER_BATCH,
434 process_ledger_before_services: false,
435 warp_slot: None,
436 accounts_db_skip_shrink: false,
437 accounts_db_force_initial_clean: false,
438 staked_nodes_overrides: Arc::new(RwLock::new(HashMap::new())),
439 validator_exit: Arc::new(RwLock::new(Exit::default())),
440 validator_exit_backpressure: HashMap::default(),
441 no_wait_for_vote_to_start_leader: true,
442 accounts_db_config: ACCOUNTS_DB_CONFIG_FOR_TESTING,
443 wait_to_vote_slot: None,
444 runtime_config: RuntimeConfig::default(),
445 banking_trace_dir_byte_limit: 0,
446 block_verification_method: BlockVerificationMethod::default(),
447 block_production_method: BlockProductionMethod::default(),
448 block_production_num_workers: BankingStage::default_num_workers(),
449 block_production_scheduler_config: SchedulerConfig::default(),
450 enable_block_production_forwarding: true,
452 generator_config: None,
453 use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup::default(),
454 wen_restart_proto_path: None,
455 wen_restart_coordinator: None,
456 unified_scheduler_handler_threads: None,
457 ip_echo_server_threads: NonZeroUsize::new(1).expect("1 is non-zero"),
458 rayon_global_threads: max_thread_count,
459 replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"),
460 replay_transactions_threads: max_thread_count,
461 tvu_shred_sigverify_threads: NonZeroUsize::new(get_thread_count())
462 .expect("thread count is non-zero"),
463 delay_leader_block_for_pending_fork: false,
464 use_tpu_client_next: true,
465 retransmit_xdp: None,
466 repair_handler_type: RepairHandlerType::default(),
467 }
468 }
469
470 pub fn enable_default_rpc_block_subscribe(&mut self) {
471 let pubsub_config = PubSubConfig {
472 enable_block_subscription: true,
473 ..PubSubConfig::default()
474 };
475 let rpc_config = JsonRpcConfig {
476 enable_rpc_transaction_history: true,
477 ..JsonRpcConfig::default_for_test()
478 };
479
480 self.pubsub_config = pubsub_config;
481 self.rpc_config = rpc_config;
482 }
483}
484
485#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Default)]
489pub enum ValidatorStartProgress {
490 #[default]
491 Initializing, SearchingForRpcService,
493 DownloadingSnapshot {
494 slot: Slot,
495 rpc_addr: SocketAddr,
496 },
497 CleaningBlockStore,
498 CleaningAccounts,
499 LoadingLedger,
500 ProcessingLedger {
501 slot: Slot,
502 max_slot: Slot,
503 },
504 StartingServices,
505 Halted, WaitingForSupermajority {
507 slot: Slot,
508 gossip_stake_percent: u64,
509 },
510
511 Running,
514}
515
516struct BlockstoreRootScan {
517 thread: Option<JoinHandle<Result<usize, BlockstoreError>>>,
518}
519
520impl BlockstoreRootScan {
521 fn new(config: &ValidatorConfig, blockstore: Arc<Blockstore>, exit: Arc<AtomicBool>) -> Self {
522 let thread = if config.rpc_addrs.is_some()
523 && config.rpc_config.enable_rpc_transaction_history
524 && config.rpc_config.rpc_scan_and_fix_roots
525 {
526 Some(
527 Builder::new()
528 .name("solBStoreRtScan".to_string())
529 .spawn(move || blockstore.scan_and_fix_roots(None, None, &exit))
530 .unwrap(),
531 )
532 } else {
533 None
534 };
535 Self { thread }
536 }
537
538 fn join(self) {
539 if let Some(blockstore_root_scan) = self.thread {
540 if let Err(err) = blockstore_root_scan.join() {
541 warn!("blockstore_root_scan failed to join {err:?}");
542 }
543 }
544 }
545}
546
547#[derive(Default)]
548struct TransactionHistoryServices {
549 transaction_status_sender: Option<TransactionStatusSender>,
550 transaction_status_service: Option<TransactionStatusService>,
551 max_complete_transaction_status_slot: Arc<AtomicU64>,
552}
553
554pub struct ValidatorTpuConfig {
556 pub use_quic: bool,
558 pub vote_use_quic: bool,
560 pub tpu_connection_pool_size: usize,
562 pub tpu_enable_udp: bool,
564 pub tpu_quic_server_config: SwQosQuicStreamerConfig,
566 pub tpu_fwd_quic_server_config: SwQosQuicStreamerConfig,
568 pub vote_quic_server_config: SimpleQosQuicStreamerConfig,
570}
571
572impl ValidatorTpuConfig {
573 pub fn new_for_tests(tpu_enable_udp: bool) -> Self {
576 let tpu_quic_server_config = SwQosQuicStreamerConfig {
577 quic_streamer_config: QuicStreamerConfig {
578 max_connections_per_ipaddr_per_min: 32,
579 accumulator_channel_size: 100_000, ..Default::default()
581 },
582 qos_config: SwQosConfig::default(),
583 };
584
585 let tpu_fwd_quic_server_config = SwQosQuicStreamerConfig {
586 quic_streamer_config: QuicStreamerConfig {
587 max_connections_per_ipaddr_per_min: 32,
588 max_unstaked_connections: 0,
589 accumulator_channel_size: 100_000, ..Default::default()
591 },
592 qos_config: SwQosConfig::default(),
593 };
594
595 let vote_quic_server_config = SimpleQosQuicStreamerConfig {
597 quic_streamer_config: QuicStreamerConfig {
598 max_connections_per_ipaddr_per_min: 32,
599 max_unstaked_connections: 0,
600 accumulator_channel_size: 100_000, ..Default::default()
602 },
603 qos_config: SimpleQosConfig::default(),
604 };
605
606 ValidatorTpuConfig {
607 use_quic: DEFAULT_TPU_USE_QUIC,
608 vote_use_quic: DEFAULT_VOTE_USE_QUIC,
609 tpu_connection_pool_size: DEFAULT_TPU_CONNECTION_POOL_SIZE,
610 tpu_enable_udp,
611 tpu_quic_server_config,
612 tpu_fwd_quic_server_config,
613 vote_quic_server_config,
614 }
615 }
616}
617
618pub struct Validator {
619 validator_exit: Arc<RwLock<Exit>>,
620 json_rpc_service: Option<JsonRpcService>,
621 pubsub_service: Option<PubSubService>,
622 rpc_completed_slots_service: Option<JoinHandle<()>>,
623 optimistically_confirmed_bank_tracker: Option<OptimisticallyConfirmedBankTracker>,
624 transaction_status_service: Option<TransactionStatusService>,
625 entry_notifier_service: Option<EntryNotifierService>,
626 system_monitor_service: Option<SystemMonitorService>,
627 sample_performance_service: Option<SamplePerformanceService>,
628 stats_reporter_service: StatsReporterService,
629 gossip_service: GossipService,
630 serve_repair_service: ServeRepairService,
631 completed_data_sets_service: Option<CompletedDataSetsService>,
632 snapshot_packager_service: Option<SnapshotPackagerService>,
633 poh_recorder: Arc<RwLock<PohRecorder>>,
634 poh_service: PohService,
635 tpu: Tpu,
636 tvu: Tvu,
637 ip_echo_server: Option<solana_net_utils::IpEchoServer>,
638 pub cluster_info: Arc<ClusterInfo>,
639 pub bank_forks: Arc<RwLock<BankForks>>,
640 pub blockstore: Arc<Blockstore>,
641 geyser_plugin_service: Option<GeyserPluginService>,
642 blockstore_metric_report_service: BlockstoreMetricReportService,
643 accounts_background_service: AccountsBackgroundService,
644 turbine_quic_endpoint: Option<Endpoint>,
645 turbine_quic_endpoint_runtime: Option<TokioRuntime>,
646 turbine_quic_endpoint_join_handle: Option<solana_turbine::quic_endpoint::AsyncTryJoinHandle>,
647 repair_quic_endpoints: Option<[Endpoint; 3]>,
648 repair_quic_endpoints_runtime: Option<TokioRuntime>,
649 repair_quic_endpoints_join_handle: Option<repair::quic_endpoint::AsyncTryJoinHandle>,
650 xdp_retransmitter: Option<XdpRetransmitter>,
651 _tpu_client_next_runtime: Option<TokioRuntime>,
655}
656
657impl Validator {
658 #[allow(clippy::too_many_arguments)]
659 pub fn new(
660 mut node: Node,
661 identity_keypair: Arc<Keypair>,
662 ledger_path: &Path,
663 vote_account: &Pubkey,
664 authorized_voter_keypairs: Arc<RwLock<Vec<Arc<Keypair>>>>,
665 cluster_entrypoints: Vec<ContactInfo>,
666 config: &ValidatorConfig,
667 should_check_duplicate_instance: bool,
668 rpc_to_plugin_manager_receiver: Option<Receiver<GeyserPluginManagerRequest>>,
669 start_progress: Arc<RwLock<ValidatorStartProgress>>,
670 socket_addr_space: SocketAddrSpace,
671 tpu_config: ValidatorTpuConfig,
672 admin_rpc_service_post_init: Arc<RwLock<Option<AdminRpcRequestMetadataPostInit>>>,
673 ) -> Result<Self> {
674 #[cfg(debug_assertions)]
675 const DEBUG_ASSERTION_STATUS: &str = "enabled";
676 #[cfg(not(debug_assertions))]
677 const DEBUG_ASSERTION_STATUS: &str = "disabled";
678 info!("debug-assertion status: {DEBUG_ASSERTION_STATUS}");
679
680 let ValidatorTpuConfig {
681 use_quic,
682 vote_use_quic,
683 tpu_connection_pool_size,
684 tpu_enable_udp,
685 tpu_quic_server_config,
686 tpu_fwd_quic_server_config,
687 vote_quic_server_config,
688 } = tpu_config;
689
690 let start_time = Instant::now();
691
692 adjust_nofile_limit(config.enforce_ulimit_nofile)?;
693
694 if rayon::ThreadPoolBuilder::new()
698 .thread_name(|i| format!("solRayonGlob{i:02}"))
699 .num_threads(config.rayon_global_threads.get())
700 .build_global()
701 .is_err()
702 {
703 warn!("Rayon global thread pool already initialized");
704 }
705
706 let id = identity_keypair.pubkey();
707 assert_eq!(&id, node.info.pubkey());
708
709 info!("identity pubkey: {id}");
710 info!("vote account pubkey: {vote_account}");
711
712 if !config.no_os_network_stats_reporting {
713 verify_net_stats_access().map_err(|e| {
714 ValidatorError::Other(format!("Failed to access network stats: {e:?}"))
715 })?;
716 }
717
718 let mut bank_notification_senders = Vec::new();
719
720 let exit = Arc::new(AtomicBool::new(false));
721
722 let geyser_plugin_config_files = config
723 .on_start_geyser_plugin_config_files
724 .as_ref()
725 .map(Cow::Borrowed)
726 .or_else(|| {
727 config
728 .geyser_plugin_always_enabled
729 .then_some(Cow::Owned(vec![]))
730 });
731 let geyser_plugin_service =
732 if let Some(geyser_plugin_config_files) = geyser_plugin_config_files {
733 let (confirmed_bank_sender, confirmed_bank_receiver) = unbounded();
734 bank_notification_senders.push(confirmed_bank_sender);
735 let rpc_to_plugin_manager_receiver_and_exit =
736 rpc_to_plugin_manager_receiver.map(|receiver| (receiver, exit.clone()));
737 Some(
738 GeyserPluginService::new_with_receiver(
739 confirmed_bank_receiver,
740 config.geyser_plugin_always_enabled,
741 geyser_plugin_config_files.as_ref(),
742 rpc_to_plugin_manager_receiver_and_exit,
743 )
744 .map_err(|err| {
745 ValidatorError::Other(format!("Failed to load the Geyser plugin: {err:?}"))
746 })?,
747 )
748 } else {
749 None
750 };
751
752 if config.voting_disabled {
753 warn!("voting disabled");
754 authorized_voter_keypairs.write().unwrap().clear();
755 } else {
756 for authorized_voter_keypair in authorized_voter_keypairs.read().unwrap().iter() {
757 warn!("authorized voter: {}", authorized_voter_keypair.pubkey());
758 }
759 }
760
761 for cluster_entrypoint in &cluster_entrypoints {
762 info!("entrypoint: {cluster_entrypoint:?}");
763 }
764
765 if solana_perf::perf_libs::api().is_some() {
766 info!("Initializing sigverify, this could take a while...");
767 } else {
768 info!("Initializing sigverify...");
769 }
770 sigverify::init();
771 info!("Initializing sigverify done.");
772
773 validate_memlock_limit_for_disk_io(config.accounts_db_config.memlock_budget_size)?;
774
775 if !ledger_path.is_dir() {
776 return Err(anyhow!(
777 "ledger directory does not exist or is not accessible: {ledger_path:?}"
778 ));
779 }
780 let genesis_config = load_genesis(config, ledger_path)?;
781 metrics_config_sanity_check(genesis_config.cluster_type)?;
782
783 info!("Cleaning accounts paths..");
784 *start_progress.write().unwrap() = ValidatorStartProgress::CleaningAccounts;
785 let mut timer = Measure::start("clean_accounts_paths");
786 cleanup_accounts_paths(config);
787 timer.stop();
788 info!("Cleaning accounts paths done. {timer}");
789
790 snapshot_utils::purge_incomplete_bank_snapshots(&config.snapshot_config.bank_snapshots_dir);
791 snapshot_utils::purge_old_bank_snapshots_at_startup(
792 &config.snapshot_config.bank_snapshots_dir,
793 );
794
795 info!("Cleaning orphaned account snapshot directories..");
796 let mut timer = Measure::start("clean_orphaned_account_snapshot_dirs");
797 clean_orphaned_account_snapshot_dirs(
798 &config.snapshot_config.bank_snapshots_dir,
799 &config.account_snapshot_paths,
800 )
801 .context("failed to clean orphaned account snapshot directories")?;
802 timer.stop();
803 info!("Cleaning orphaned account snapshot directories done. {timer}");
804
805 let cancel = CancellationToken::new();
807 {
808 let exit = exit.clone();
809 config
810 .validator_exit
811 .write()
812 .unwrap()
813 .register_exit(Box::new(move || exit.store(true, Ordering::Relaxed)));
814 let cancel = cancel.clone();
815 config
816 .validator_exit
817 .write()
818 .unwrap()
819 .register_exit(Box::new(move || cancel.cancel()));
820 }
821
822 let (
823 accounts_update_notifier,
824 transaction_notifier,
825 entry_notifier,
826 block_metadata_notifier,
827 slot_status_notifier,
828 ) = if let Some(service) = &geyser_plugin_service {
829 (
830 service.get_accounts_update_notifier(),
831 service.get_transaction_notifier(),
832 service.get_entry_notifier(),
833 service.get_block_metadata_notifier(),
834 service.get_slot_status_notifier(),
835 )
836 } else {
837 (None, None, None, None, None)
838 };
839
840 info!(
841 "Geyser plugin: accounts_update_notifier: {}, transaction_notifier: {}, \
842 entry_notifier: {}",
843 accounts_update_notifier.is_some(),
844 transaction_notifier.is_some(),
845 entry_notifier.is_some()
846 );
847
848 let system_monitor_service = Some(SystemMonitorService::new(
849 exit.clone(),
850 SystemMonitorStatsReportConfig {
851 report_os_memory_stats: !config.no_os_memory_stats_reporting,
852 report_os_network_stats: !config.no_os_network_stats_reporting,
853 report_os_cpu_stats: !config.no_os_cpu_stats_reporting,
854 report_os_disk_stats: !config.no_os_disk_stats_reporting,
855 },
856 ));
857
858 let dependency_tracker = Arc::new(DependencyTracker::default());
859
860 let (
861 bank_forks,
862 blockstore,
863 original_blockstore_root,
864 ledger_signal_receiver,
865 leader_schedule_cache,
866 starting_snapshot_hashes,
867 TransactionHistoryServices {
868 transaction_status_sender,
869 transaction_status_service,
870 max_complete_transaction_status_slot,
871 },
872 blockstore_process_options,
873 blockstore_root_scan,
874 pruned_banks_receiver,
875 entry_notifier_service,
876 ) = load_blockstore(
877 config,
878 ledger_path,
879 &genesis_config,
880 exit.clone(),
881 &start_progress,
882 accounts_update_notifier,
883 transaction_notifier,
884 entry_notifier,
885 config
886 .rpc_addrs
887 .is_some()
888 .then(|| dependency_tracker.clone()),
889 )
890 .map_err(ValidatorError::Other)?;
891
892 if !config.no_poh_speed_test {
893 check_poh_speed(&bank_forks.read().unwrap().root_bank(), None)?;
894 }
895
896 let (root_slot, hard_forks) = {
897 let root_bank = bank_forks.read().unwrap().root_bank();
898 (root_bank.slot(), root_bank.hard_forks())
899 };
900 let shred_version = compute_shred_version(&genesis_config.hash(), Some(&hard_forks));
901 info!("shred version: {shred_version}, hard forks: {hard_forks:?}");
902
903 if let Some(expected_shred_version) = config.expected_shred_version {
904 if expected_shred_version != shred_version {
905 return Err(ValidatorError::ShredVersionMismatch {
906 actual: shred_version,
907 expected: expected_shred_version,
908 }
909 .into());
910 }
911 }
912
913 if let Some(start_slot) = should_cleanup_blockstore_incorrect_shred_versions(
914 config,
915 &blockstore,
916 root_slot,
917 &hard_forks,
918 )? {
919 *start_progress.write().unwrap() = ValidatorStartProgress::CleaningBlockStore;
920 cleanup_blockstore_incorrect_shred_versions(
921 &blockstore,
922 config,
923 start_slot,
924 shred_version,
925 )?;
926 } else {
927 info!("Skipping the blockstore check for shreds with incorrect version");
928 }
929
930 node.info.set_shred_version(shred_version);
931 node.info.set_wallclock(timestamp());
932 Self::print_node_info(&node);
933
934 let mut cluster_info = ClusterInfo::new(
935 node.info.clone(),
936 identity_keypair.clone(),
937 socket_addr_space,
938 );
939 cluster_info.set_contact_debug_interval(config.contact_debug_interval);
940 cluster_info.set_entrypoints(cluster_entrypoints);
941 cluster_info.restore_contact_info(ledger_path, config.contact_save_interval);
942 cluster_info.set_bind_ip_addrs(node.bind_ip_addrs.clone());
943 let cluster_info = Arc::new(cluster_info);
944 let node_multihoming = Arc::new(NodeMultihoming::from(&node));
945
946 assert!(is_snapshot_config_valid(&config.snapshot_config));
947
948 let (snapshot_request_sender, snapshot_request_receiver) = unbounded();
949 let snapshot_controller = Arc::new(SnapshotController::new(
950 snapshot_request_sender.clone(),
951 config.snapshot_config.clone(),
952 bank_forks.read().unwrap().root(),
953 ));
954
955 let pending_snapshot_packages = Arc::new(Mutex::new(PendingSnapshotPackages::default()));
956 let snapshot_packager_service = if snapshot_controller
957 .snapshot_config()
958 .should_generate_snapshots()
959 {
960 let exit_backpressure = config
961 .validator_exit_backpressure
962 .get(SnapshotPackagerService::NAME)
963 .cloned();
964 let enable_gossip_push = true;
965 let snapshot_packager_service = SnapshotPackagerService::new(
966 pending_snapshot_packages.clone(),
967 starting_snapshot_hashes,
968 exit.clone(),
969 exit_backpressure,
970 cluster_info.clone(),
971 snapshot_controller.clone(),
972 enable_gossip_push,
973 );
974 Some(snapshot_packager_service)
975 } else {
976 None
977 };
978
979 let snapshot_request_handler = SnapshotRequestHandler {
980 snapshot_controller: snapshot_controller.clone(),
981 snapshot_request_receiver,
982 pending_snapshot_packages,
983 };
984 let pruned_banks_request_handler = PrunedBanksRequestHandler {
985 pruned_banks_receiver,
986 };
987 let accounts_background_service = AccountsBackgroundService::new(
988 bank_forks.clone(),
989 exit.clone(),
990 AbsRequestHandlers {
991 snapshot_request_handler,
992 pruned_banks_request_handler,
993 },
994 );
995 info!(
996 "Using: block-verification-method: {}, block-production-method: {}",
997 config.block_verification_method, config.block_production_method,
998 );
999
1000 let (replay_vote_sender, replay_vote_receiver) = unbounded();
1001
1002 let prioritization_fee_cache = Arc::new(PrioritizationFeeCache::default());
1005
1006 let leader_schedule_cache = Arc::new(leader_schedule_cache);
1007 let (poh_recorder, entry_receiver) = {
1008 let bank = &bank_forks.read().unwrap().working_bank();
1009 PohRecorder::new_with_clear_signal(
1010 bank.tick_height(),
1011 bank.last_blockhash(),
1012 bank.clone(),
1013 None,
1014 bank.ticks_per_slot(),
1015 config.delay_leader_block_for_pending_fork,
1016 blockstore.clone(),
1017 blockstore.get_new_shred_signal(0),
1018 &leader_schedule_cache,
1019 &genesis_config.poh_config,
1020 exit.clone(),
1021 )
1022 };
1023 let (record_sender, record_receiver) = record_channels(transaction_status_sender.is_some());
1024 let transaction_recorder = TransactionRecorder::new(record_sender);
1025 let poh_recorder = Arc::new(RwLock::new(poh_recorder));
1026 let (poh_controller, poh_service_message_receiver) = PohController::new();
1027
1028 let (banking_tracer, tracer_thread) =
1029 BankingTracer::new((config.banking_trace_dir_byte_limit > 0).then_some((
1030 &blockstore.banking_trace_path(),
1031 exit.clone(),
1032 config.banking_trace_dir_byte_limit,
1033 )))?;
1034 if banking_tracer.is_enabled() {
1035 info!(
1036 "Enabled banking trace (dir_byte_limit: {})",
1037 config.banking_trace_dir_byte_limit
1038 );
1039 } else {
1040 info!("Disabled banking trace");
1041 }
1042 let banking_tracer_channels = banking_tracer.create_channels(false);
1043
1044 match &config.block_verification_method {
1045 BlockVerificationMethod::BlockstoreProcessor => {
1046 info!("no scheduler pool is installed for block verification...");
1047 if let Some(count) = config.unified_scheduler_handler_threads {
1048 warn!(
1049 "--unified-scheduler-handler-threads={count} is ignored because unified \
1050 scheduler isn't enabled"
1051 );
1052 }
1053 }
1054 BlockVerificationMethod::UnifiedScheduler => {
1055 let scheduler_pool = DefaultSchedulerPool::new_dyn(
1056 config.unified_scheduler_handler_threads,
1057 config.runtime_config.log_messages_bytes_limit,
1058 transaction_status_sender.clone(),
1059 Some(replay_vote_sender.clone()),
1060 prioritization_fee_cache.clone(),
1061 );
1062 bank_forks
1063 .write()
1064 .unwrap()
1065 .install_scheduler_pool(scheduler_pool);
1066 }
1067 }
1068
1069 let entry_notification_sender = entry_notifier_service
1070 .as_ref()
1071 .map(|service| service.sender());
1072 let mut process_blockstore = ProcessBlockStore::new(
1073 &id,
1074 vote_account,
1075 &start_progress,
1076 &blockstore,
1077 original_blockstore_root,
1078 &bank_forks,
1079 &leader_schedule_cache,
1080 &blockstore_process_options,
1081 transaction_status_sender.as_ref(),
1082 entry_notification_sender,
1083 blockstore_root_scan,
1084 &snapshot_controller,
1085 config,
1086 );
1087
1088 maybe_warp_slot(
1089 config,
1090 &mut process_blockstore,
1091 ledger_path,
1092 &bank_forks,
1093 &leader_schedule_cache,
1094 &snapshot_controller,
1095 )
1096 .map_err(ValidatorError::Other)?;
1097
1098 if config.process_ledger_before_services {
1099 process_blockstore
1100 .process()
1101 .map_err(ValidatorError::Other)?;
1102 }
1103 *start_progress.write().unwrap() = ValidatorStartProgress::StartingServices;
1104
1105 let sample_performance_service =
1106 if config.rpc_addrs.is_some() && config.rpc_config.enable_rpc_transaction_history {
1107 Some(SamplePerformanceService::new(
1108 &bank_forks,
1109 blockstore.clone(),
1110 exit.clone(),
1111 ))
1112 } else {
1113 None
1114 };
1115
1116 let mut block_commitment_cache = BlockCommitmentCache::default();
1117 let bank_forks_guard = bank_forks.read().unwrap();
1118 block_commitment_cache.initialize_slots(
1119 bank_forks_guard.working_bank().slot(),
1120 bank_forks_guard.root(),
1121 );
1122 drop(bank_forks_guard);
1123 let block_commitment_cache = Arc::new(RwLock::new(block_commitment_cache));
1124
1125 let optimistically_confirmed_bank =
1126 OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
1127
1128 let max_slots = Arc::new(MaxSlots::default());
1129
1130 let staked_nodes = Arc::new(RwLock::new(StakedNodes::default()));
1131
1132 let mut tpu_transactions_forwards_client_sockets =
1133 Some(node.sockets.tpu_transaction_forwarding_clients);
1134 let connection_cache = match (config.use_tpu_client_next, use_quic) {
1135 (false, true) => Some(Arc::new(ConnectionCache::new_with_client_options(
1136 "connection_cache_tpu_quic",
1137 tpu_connection_pool_size,
1138 Some({
1139 let socketbox: Box<[_; 1]> = tpu_transactions_forwards_client_sockets
1142 .take()
1143 .unwrap()
1144 .try_into()
1145 .expect("Multihoming support for connection cache is not available");
1146 let [sock] = *socketbox;
1147 sock
1148 }),
1149 Some((
1150 &identity_keypair,
1151 node.info
1152 .tpu(Protocol::UDP)
1153 .ok_or_else(|| {
1154 ValidatorError::Other(String::from("Invalid UDP address for TPU"))
1155 })?
1156 .ip(),
1157 )),
1158 Some((&staked_nodes, &identity_keypair.pubkey())),
1159 ))),
1160 (false, false) => Some(Arc::new(ConnectionCache::with_udp(
1161 "connection_cache_tpu_udp",
1162 tpu_connection_pool_size,
1163 ))),
1164 (true, _) => None,
1165 };
1166
1167 let vote_connection_cache = if vote_use_quic {
1168 let vote_connection_cache = ConnectionCache::new_with_client_options(
1169 "connection_cache_vote_quic",
1170 tpu_connection_pool_size,
1171 Some(node.sockets.quic_vote_client),
1172 Some((
1173 &identity_keypair,
1174 node.info
1175 .tpu_vote(Protocol::QUIC)
1176 .ok_or_else(|| {
1177 ValidatorError::Other(String::from("Invalid QUIC address for TPU Vote"))
1178 })?
1179 .ip(),
1180 )),
1181 Some((&staked_nodes, &identity_keypair.pubkey())),
1182 );
1183 Arc::new(vote_connection_cache)
1184 } else {
1185 Arc::new(ConnectionCache::with_udp(
1186 "connection_cache_vote_udp",
1187 tpu_connection_pool_size,
1188 ))
1189 };
1190
1191 let current_runtime_handle = tokio::runtime::Handle::try_current();
1197 let tpu_client_next_runtime =
1198 (current_runtime_handle.is_err() && config.use_tpu_client_next).then(|| {
1199 tokio::runtime::Builder::new_multi_thread()
1200 .enable_all()
1201 .worker_threads(2)
1202 .thread_name("solTpuClientRt")
1203 .build()
1204 .unwrap()
1205 });
1206
1207 let rpc_override_health_check =
1208 Arc::new(AtomicBool::new(config.rpc_config.disable_health_check));
1209 let (
1210 json_rpc_service,
1211 rpc_subscriptions,
1212 pubsub_service,
1213 completed_data_sets_sender,
1214 completed_data_sets_service,
1215 rpc_completed_slots_service,
1216 optimistically_confirmed_bank_tracker,
1217 bank_notification_sender,
1218 ) = if let Some((rpc_addr, rpc_pubsub_addr)) = config.rpc_addrs {
1219 assert_eq!(
1220 node.info.rpc().map(|addr| socket_addr_space.check(&addr)),
1221 node.info
1222 .rpc_pubsub()
1223 .map(|addr| socket_addr_space.check(&addr))
1224 );
1225 let (bank_notification_sender, bank_notification_receiver) = unbounded();
1226 let confirmed_bank_subscribers = if !bank_notification_senders.is_empty() {
1227 Some(Arc::new(RwLock::new(bank_notification_senders)))
1228 } else {
1229 None
1230 };
1231
1232 let client_option = if config.use_tpu_client_next {
1233 let runtime_handle = tpu_client_next_runtime
1234 .as_ref()
1235 .map(TokioRuntime::handle)
1236 .unwrap_or_else(|| current_runtime_handle.as_ref().unwrap());
1237 ClientOption::TpuClientNext(
1238 Arc::as_ref(&identity_keypair),
1239 node.sockets.rpc_sts_client,
1240 runtime_handle.clone(),
1241 cancel.clone(),
1242 )
1243 } else {
1244 let Some(connection_cache) = &connection_cache else {
1245 panic!("ConnectionCache should exist by construction.");
1246 };
1247 ClientOption::ConnectionCache(connection_cache.clone())
1248 };
1249 let rpc_svc_config = JsonRpcServiceConfig {
1250 rpc_addr,
1251 rpc_config: config.rpc_config.clone(),
1252 snapshot_config: Some(snapshot_controller.snapshot_config().clone()),
1253 bank_forks: bank_forks.clone(),
1254 block_commitment_cache: block_commitment_cache.clone(),
1255 blockstore: blockstore.clone(),
1256 cluster_info: cluster_info.clone(),
1257 poh_recorder: Some(poh_recorder.clone()),
1258 genesis_hash: genesis_config.hash(),
1259 ledger_path: ledger_path.to_path_buf(),
1260 validator_exit: config.validator_exit.clone(),
1261 exit: exit.clone(),
1262 override_health_check: rpc_override_health_check.clone(),
1263 optimistically_confirmed_bank: optimistically_confirmed_bank.clone(),
1264 send_transaction_service_config: config.send_transaction_service_config.clone(),
1265 max_slots: max_slots.clone(),
1266 leader_schedule_cache: leader_schedule_cache.clone(),
1267 max_complete_transaction_status_slot: max_complete_transaction_status_slot.clone(),
1268 prioritization_fee_cache: prioritization_fee_cache.clone(),
1269 client_option,
1270 };
1271 let json_rpc_service =
1272 JsonRpcService::new_with_config(rpc_svc_config).map_err(ValidatorError::Other)?;
1273 let rpc_subscriptions = Arc::new(RpcSubscriptions::new_with_config(
1274 exit.clone(),
1275 max_complete_transaction_status_slot,
1276 blockstore.clone(),
1277 bank_forks.clone(),
1278 block_commitment_cache.clone(),
1279 optimistically_confirmed_bank.clone(),
1280 &config.pubsub_config,
1281 None,
1282 ));
1283 let pubsub_service = if !config.rpc_config.full_api {
1284 None
1285 } else {
1286 let (trigger, pubsub_service) = PubSubService::new(
1287 config.pubsub_config.clone(),
1288 &rpc_subscriptions,
1289 rpc_pubsub_addr,
1290 );
1291 config
1292 .validator_exit
1293 .write()
1294 .unwrap()
1295 .register_exit(Box::new(move || trigger.cancel()));
1296
1297 Some(pubsub_service)
1298 };
1299
1300 let (completed_data_sets_sender, completed_data_sets_service) =
1301 if !config.rpc_config.full_api {
1302 (None, None)
1303 } else {
1304 let (completed_data_sets_sender, completed_data_sets_receiver) =
1305 bounded(MAX_COMPLETED_DATA_SETS_IN_CHANNEL);
1306 let completed_data_sets_service = CompletedDataSetsService::new(
1307 completed_data_sets_receiver,
1308 blockstore.clone(),
1309 rpc_subscriptions.clone(),
1310 exit.clone(),
1311 max_slots.clone(),
1312 );
1313 (
1314 Some(completed_data_sets_sender),
1315 Some(completed_data_sets_service),
1316 )
1317 };
1318
1319 let rpc_completed_slots_service =
1320 if config.rpc_config.full_api || geyser_plugin_service.is_some() {
1321 let (completed_slots_sender, completed_slots_receiver) =
1322 bounded(MAX_COMPLETED_SLOTS_IN_CHANNEL);
1323 blockstore.add_completed_slots_signal(completed_slots_sender);
1324
1325 Some(RpcCompletedSlotsService::spawn(
1326 completed_slots_receiver,
1327 rpc_subscriptions.clone(),
1328 slot_status_notifier.clone(),
1329 exit.clone(),
1330 ))
1331 } else {
1332 None
1333 };
1334
1335 let dependency_tracker = transaction_status_sender
1336 .is_some()
1337 .then_some(dependency_tracker);
1338 let optimistically_confirmed_bank_tracker =
1339 Some(OptimisticallyConfirmedBankTracker::new(
1340 bank_notification_receiver,
1341 exit.clone(),
1342 bank_forks.clone(),
1343 optimistically_confirmed_bank,
1344 rpc_subscriptions.clone(),
1345 confirmed_bank_subscribers,
1346 prioritization_fee_cache.clone(),
1347 dependency_tracker.clone(),
1348 ));
1349 let bank_notification_sender_config = Some(BankNotificationSenderConfig {
1350 sender: bank_notification_sender,
1351 should_send_parents: geyser_plugin_service.is_some(),
1352 dependency_tracker,
1353 });
1354 (
1355 Some(json_rpc_service),
1356 Some(rpc_subscriptions),
1357 pubsub_service,
1358 completed_data_sets_sender,
1359 completed_data_sets_service,
1360 rpc_completed_slots_service,
1361 optimistically_confirmed_bank_tracker,
1362 bank_notification_sender_config,
1363 )
1364 } else {
1365 (None, None, None, None, None, None, None, None)
1366 };
1367
1368 if config.halt_at_slot.is_some() {
1369 block_commitment_cache
1372 .write()
1373 .unwrap()
1374 .set_highest_super_majority_root(bank_forks.read().unwrap().root());
1375
1376 warn!("Validator halted");
1378 *start_progress.write().unwrap() = ValidatorStartProgress::Halted;
1379 std::thread::park();
1380 }
1381 let ip_echo_server = match node.sockets.ip_echo {
1382 None => None,
1383 Some(tcp_listener) => Some(solana_net_utils::ip_echo_server(
1384 tcp_listener,
1385 config.ip_echo_server_threads,
1386 Some(node.info.shred_version()),
1387 )),
1388 };
1389
1390 let (stats_reporter_sender, stats_reporter_receiver) = unbounded();
1391
1392 let stats_reporter_service =
1393 StatsReporterService::new(stats_reporter_receiver, exit.clone());
1394
1395 let gossip_service = GossipService::new(
1396 &cluster_info,
1397 Some(bank_forks.clone()),
1398 node.sockets.gossip.clone(),
1399 config.gossip_validators.clone(),
1400 should_check_duplicate_instance,
1401 Some(stats_reporter_sender.clone()),
1402 exit.clone(),
1403 );
1404 let serve_repair = config.repair_handler_type.create_serve_repair(
1405 blockstore.clone(),
1406 cluster_info.clone(),
1407 bank_forks.read().unwrap().sharable_banks(),
1408 config.repair_whitelist.clone(),
1409 );
1410 let (repair_request_quic_sender, repair_request_quic_receiver) = unbounded();
1411 let (repair_response_quic_sender, repair_response_quic_receiver) = unbounded();
1412 let (ancestor_hashes_response_quic_sender, ancestor_hashes_response_quic_receiver) =
1413 unbounded();
1414
1415 let waited_for_supermajority = wait_for_supermajority(
1416 config,
1417 Some(&mut process_blockstore),
1418 &bank_forks,
1419 &cluster_info,
1420 rpc_override_health_check,
1421 &start_progress,
1422 )?;
1423
1424 let blockstore_metric_report_service =
1425 BlockstoreMetricReportService::new(blockstore.clone(), exit.clone());
1426
1427 let wait_for_vote_to_start_leader =
1428 !waited_for_supermajority && !config.no_wait_for_vote_to_start_leader;
1429
1430 let poh_service = PohService::new(
1431 poh_recorder.clone(),
1432 &genesis_config.poh_config,
1433 exit.clone(),
1434 bank_forks.read().unwrap().root_bank().ticks_per_slot(),
1435 config.poh_pinned_cpu_core,
1436 config.poh_hashes_per_batch,
1437 record_receiver,
1438 poh_service_message_receiver,
1439 );
1440 assert_eq!(
1441 blockstore.get_new_shred_signals_len(),
1442 1,
1443 "New shred signal for the TVU should be the same as the clear bank signal."
1444 );
1445
1446 let vote_tracker = Arc::<VoteTracker>::default();
1447
1448 let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded();
1449 let (verified_vote_sender, verified_vote_receiver) = unbounded();
1450 let (gossip_verified_vote_hash_sender, gossip_verified_vote_hash_receiver) = unbounded();
1451 let (duplicate_confirmed_slot_sender, duplicate_confirmed_slots_receiver) = unbounded();
1452
1453 let entry_notification_sender = entry_notifier_service
1454 .as_ref()
1455 .map(|service| service.sender_cloned());
1456
1457 let turbine_quic_endpoint_runtime = (current_runtime_handle.is_err()
1458 && genesis_config.cluster_type != ClusterType::MainnetBeta)
1459 .then(|| {
1460 tokio::runtime::Builder::new_multi_thread()
1461 .enable_all()
1462 .thread_name("solTurbineQuic")
1463 .build()
1464 .unwrap()
1465 });
1466 let (turbine_quic_endpoint_sender, turbine_quic_endpoint_receiver) = unbounded();
1467 let (
1468 turbine_quic_endpoint,
1469 turbine_quic_endpoint_sender,
1470 turbine_quic_endpoint_join_handle,
1471 ) = if genesis_config.cluster_type == ClusterType::MainnetBeta {
1472 let (sender, _receiver) = tokio::sync::mpsc::channel(1);
1473 (None, sender, None)
1474 } else {
1475 solana_turbine::quic_endpoint::new_quic_endpoint(
1476 turbine_quic_endpoint_runtime
1477 .as_ref()
1478 .map(TokioRuntime::handle)
1479 .unwrap_or_else(|| current_runtime_handle.as_ref().unwrap()),
1480 &identity_keypair,
1481 node.sockets.tvu_quic,
1482 turbine_quic_endpoint_sender,
1483 bank_forks.clone(),
1484 )
1485 .map(|(endpoint, sender, join_handle)| (Some(endpoint), sender, Some(join_handle)))
1486 .unwrap()
1487 };
1488
1489 let repair_quic_endpoints_runtime = (current_runtime_handle.is_err()
1491 && genesis_config.cluster_type != ClusterType::MainnetBeta)
1492 .then(|| {
1493 tokio::runtime::Builder::new_multi_thread()
1494 .enable_all()
1495 .thread_name("solRepairQuic")
1496 .build()
1497 .unwrap()
1498 });
1499 let (repair_quic_endpoints, repair_quic_async_senders, repair_quic_endpoints_join_handle) =
1500 if genesis_config.cluster_type == ClusterType::MainnetBeta {
1501 (None, RepairQuicAsyncSenders::new_dummy(), None)
1502 } else {
1503 let repair_quic_sockets = RepairQuicSockets {
1504 repair_server_quic_socket: node.sockets.serve_repair_quic,
1505 repair_client_quic_socket: node.sockets.repair_quic,
1506 ancestor_hashes_quic_socket: node.sockets.ancestor_hashes_requests_quic,
1507 };
1508 let repair_quic_senders = RepairQuicSenders {
1509 repair_request_quic_sender: repair_request_quic_sender.clone(),
1510 repair_response_quic_sender,
1511 ancestor_hashes_response_quic_sender,
1512 };
1513 repair::quic_endpoint::new_quic_endpoints(
1514 repair_quic_endpoints_runtime
1515 .as_ref()
1516 .map(TokioRuntime::handle)
1517 .unwrap_or_else(|| current_runtime_handle.as_ref().unwrap()),
1518 &identity_keypair,
1519 repair_quic_sockets,
1520 repair_quic_senders,
1521 bank_forks.clone(),
1522 )
1523 .map(|(endpoints, senders, join_handle)| {
1524 (Some(endpoints), senders, Some(join_handle))
1525 })
1526 .unwrap()
1527 };
1528 let serve_repair_service = ServeRepairService::new(
1529 serve_repair,
1530 repair_request_quic_sender,
1533 repair_request_quic_receiver,
1534 repair_quic_async_senders.repair_response_quic_sender,
1535 node.sockets.serve_repair,
1536 socket_addr_space,
1537 stats_reporter_sender,
1538 exit.clone(),
1539 );
1540
1541 let in_wen_restart = config.wen_restart_proto_path.is_some() && !waited_for_supermajority;
1542 let wen_restart_repair_slots = if in_wen_restart {
1543 Some(Arc::new(RwLock::new(Vec::new())))
1544 } else {
1545 None
1546 };
1547 let tower = match process_blockstore.process_to_create_tower() {
1548 Ok(tower) => {
1549 info!("Tower state: {tower:?}");
1550 tower
1551 }
1552 Err(e) => {
1553 warn!("Unable to retrieve tower: {e:?} creating default tower....");
1554 Tower::default()
1555 }
1556 };
1557 let last_vote = tower.last_vote();
1558
1559 let outstanding_repair_requests =
1560 Arc::<RwLock<repair::repair_service::OutstandingShredRepairs>>::default();
1561 let root_bank = bank_forks.read().unwrap().root_bank();
1562 let cluster_slots = Arc::new({
1563 crate::cluster_slots_service::cluster_slots::ClusterSlots::new(
1564 &root_bank,
1565 &cluster_info,
1566 )
1567 });
1568
1569 let connection_cache_for_warmup =
1571 if json_rpc_service.is_some() && connection_cache.is_some() {
1572 connection_cache.as_ref()
1573 } else {
1574 None
1575 };
1576 let (xdp_retransmitter, xdp_sender) =
1577 if let Some(xdp_config) = config.retransmit_xdp.clone() {
1578 let src_port = node.sockets.retransmit_sockets[0]
1579 .local_addr()
1580 .expect("failed to get local address")
1581 .port();
1582 let src_ip = match node.bind_ip_addrs.active() {
1583 IpAddr::V4(ip) if !ip.is_unspecified() => Some(ip),
1584 IpAddr::V4(_unspecified) => xdp_config
1585 .interface
1586 .as_ref()
1587 .and_then(|iface| master_ip_if_bonded(iface)),
1588 _ => panic!("IPv6 not supported"),
1589 };
1590 let (rtx, sender) = XdpRetransmitter::new(xdp_config, src_port, src_ip)
1591 .expect("failed to create xdp retransmitter");
1592 (Some(rtx), Some(sender))
1593 } else {
1594 (None, None)
1595 };
1596
1597 let alpenglow_socket = if genesis_config.cluster_type == ClusterType::Testnet
1599 || genesis_config.cluster_type == ClusterType::Development
1600 {
1601 node.sockets.alpenglow
1602 } else {
1603 None
1604 };
1605
1606 let tvu = Tvu::new(
1607 vote_account,
1608 authorized_voter_keypairs,
1609 &bank_forks,
1610 &cluster_info,
1611 TvuSockets {
1612 repair: node.sockets.repair.try_clone().unwrap(),
1613 retransmit: node.sockets.retransmit_sockets,
1614 fetch: node.sockets.tvu,
1615 ancestor_hashes_requests: node.sockets.ancestor_hashes_requests,
1616 alpenglow: alpenglow_socket,
1617 },
1618 blockstore.clone(),
1619 ledger_signal_receiver,
1620 rpc_subscriptions.clone(),
1621 &poh_recorder,
1622 poh_controller,
1623 tower,
1624 config.tower_storage.clone(),
1625 &leader_schedule_cache,
1626 exit.clone(),
1627 block_commitment_cache,
1628 config.turbine_disabled.clone(),
1629 transaction_status_sender.clone(),
1630 entry_notification_sender.clone(),
1631 vote_tracker.clone(),
1632 retransmit_slots_sender,
1633 gossip_verified_vote_hash_receiver,
1634 verified_vote_receiver,
1635 replay_vote_sender.clone(),
1636 completed_data_sets_sender,
1637 bank_notification_sender.clone(),
1638 duplicate_confirmed_slots_receiver,
1639 TvuConfig {
1640 max_ledger_shreds: config.max_ledger_shreds,
1641 shred_version: node.info.shred_version(),
1642 repair_validators: config.repair_validators.clone(),
1643 repair_whitelist: config.repair_whitelist.clone(),
1644 wait_for_vote_to_start_leader,
1645 replay_forks_threads: config.replay_forks_threads,
1646 replay_transactions_threads: config.replay_transactions_threads,
1647 shred_sigverify_threads: config.tvu_shred_sigverify_threads,
1648 xdp_sender: xdp_sender.clone(),
1649 },
1650 &max_slots,
1651 block_metadata_notifier,
1652 config.wait_to_vote_slot,
1653 Some(snapshot_controller.clone()),
1654 config.runtime_config.log_messages_bytes_limit,
1655 connection_cache_for_warmup,
1656 &prioritization_fee_cache,
1657 banking_tracer.clone(),
1658 turbine_quic_endpoint_sender.clone(),
1659 turbine_quic_endpoint_receiver,
1660 repair_response_quic_receiver,
1661 repair_quic_async_senders.repair_request_quic_sender,
1662 repair_quic_async_senders.ancestor_hashes_request_quic_sender,
1663 ancestor_hashes_response_quic_receiver,
1664 outstanding_repair_requests.clone(),
1665 cluster_slots.clone(),
1666 wen_restart_repair_slots.clone(),
1667 slot_status_notifier,
1668 vote_connection_cache,
1669 )
1670 .map_err(ValidatorError::Other)?;
1671
1672 if in_wen_restart {
1673 info!("Waiting for wen_restart to finish");
1674 wait_for_wen_restart(WenRestartConfig {
1675 wen_restart_path: config.wen_restart_proto_path.clone().unwrap(),
1676 wen_restart_coordinator: config.wen_restart_coordinator.unwrap(),
1677 last_vote,
1678 blockstore: blockstore.clone(),
1679 cluster_info: cluster_info.clone(),
1680 bank_forks: bank_forks.clone(),
1681 wen_restart_repair_slots: wen_restart_repair_slots.clone(),
1682 wait_for_supermajority_threshold_percent:
1683 WAIT_FOR_WEN_RESTART_SUPERMAJORITY_THRESHOLD_PERCENT,
1684 snapshot_controller: Some(snapshot_controller.clone()),
1685 abs_status: accounts_background_service.status().clone(),
1686 genesis_config_hash: genesis_config.hash(),
1687 exit: exit.clone(),
1688 })?;
1689 return Err(ValidatorError::WenRestartFinished.into());
1690 }
1691
1692 let key_notifiers = Arc::new(RwLock::new(KeyUpdaters::default()));
1693 let forwarding_tpu_client = if let Some(connection_cache) = &connection_cache {
1694 ForwardingClientOption::ConnectionCache(connection_cache.clone())
1695 } else {
1696 let runtime_handle = tpu_client_next_runtime
1697 .as_ref()
1698 .map(TokioRuntime::handle)
1699 .unwrap_or_else(|| current_runtime_handle.as_ref().unwrap());
1700 ForwardingClientOption::TpuClientNext((
1701 Arc::as_ref(&identity_keypair),
1702 tpu_transactions_forwards_client_sockets.take().unwrap(),
1703 runtime_handle.clone(),
1704 cancel.clone(),
1705 node_multihoming.clone(),
1706 ))
1707 };
1708 let tpu = Tpu::new_with_client(
1709 &cluster_info,
1710 &poh_recorder,
1711 transaction_recorder,
1712 entry_receiver,
1713 retransmit_slots_receiver,
1714 TpuSockets {
1715 transactions: node.sockets.tpu,
1716 transaction_forwards: node.sockets.tpu_forwards,
1717 vote: node.sockets.tpu_vote,
1718 broadcast: node.sockets.broadcast,
1719 transactions_quic: node.sockets.tpu_quic,
1720 transactions_forwards_quic: node.sockets.tpu_forwards_quic,
1721 vote_quic: node.sockets.tpu_vote_quic,
1722 vote_forwarding_client: node.sockets.tpu_vote_forwarding_client,
1723 vortexor_receivers: node.sockets.vortexor_receivers,
1724 },
1725 rpc_subscriptions.clone(),
1726 transaction_status_sender,
1727 entry_notification_sender,
1728 blockstore.clone(),
1729 &config.broadcast_stage_type,
1730 xdp_sender,
1731 exit,
1732 node.info.shred_version(),
1733 vote_tracker,
1734 bank_forks.clone(),
1735 verified_vote_sender,
1736 gossip_verified_vote_hash_sender,
1737 replay_vote_receiver,
1738 replay_vote_sender,
1739 bank_notification_sender,
1740 duplicate_confirmed_slot_sender,
1741 forwarding_tpu_client,
1742 turbine_quic_endpoint_sender,
1743 &identity_keypair,
1744 config.runtime_config.log_messages_bytes_limit,
1745 &staked_nodes,
1746 config.staked_nodes_overrides.clone(),
1747 banking_tracer_channels,
1748 tracer_thread,
1749 tpu_enable_udp,
1750 tpu_quic_server_config,
1751 tpu_fwd_quic_server_config,
1752 vote_quic_server_config,
1753 &prioritization_fee_cache,
1754 config.block_production_method.clone(),
1755 config.block_production_num_workers,
1756 config.block_production_scheduler_config.clone(),
1757 config.enable_block_production_forwarding,
1758 config.generator_config.clone(),
1759 key_notifiers.clone(),
1760 cancel,
1761 );
1762
1763 datapoint_info!(
1764 "validator-new",
1765 ("id", id.to_string(), String),
1766 ("version", solana_version::version!(), String),
1767 ("cluster_type", genesis_config.cluster_type as u32, i64),
1768 ("elapsed_ms", start_time.elapsed().as_millis() as i64, i64),
1769 ("waited_for_supermajority", waited_for_supermajority, bool),
1770 ("shred_version", shred_version as i64, i64),
1771 );
1772
1773 *start_progress.write().unwrap() = ValidatorStartProgress::Running;
1774 if config.use_tpu_client_next {
1775 if let Some(json_rpc_service) = &json_rpc_service {
1776 key_notifiers.write().unwrap().add(
1777 KeyUpdaterType::RpcService,
1778 json_rpc_service.get_client_key_updater(),
1779 );
1780 }
1781 }
1784
1785 *admin_rpc_service_post_init.write().unwrap() = Some(AdminRpcRequestMetadataPostInit {
1786 bank_forks: bank_forks.clone(),
1787 cluster_info: cluster_info.clone(),
1788 vote_account: *vote_account,
1789 repair_whitelist: config.repair_whitelist.clone(),
1790 notifies: key_notifiers,
1791 repair_socket: Arc::new(node.sockets.repair),
1792 outstanding_repair_requests,
1793 cluster_slots,
1794 node: Some(node_multihoming),
1795 banking_stage: tpu.banking_stage(),
1796 });
1797
1798 Ok(Self {
1799 stats_reporter_service,
1800 gossip_service,
1801 serve_repair_service,
1802 json_rpc_service,
1803 pubsub_service,
1804 rpc_completed_slots_service,
1805 optimistically_confirmed_bank_tracker,
1806 transaction_status_service,
1807 entry_notifier_service,
1808 system_monitor_service,
1809 sample_performance_service,
1810 snapshot_packager_service,
1811 completed_data_sets_service,
1812 tpu,
1813 tvu,
1814 poh_service,
1815 poh_recorder,
1816 ip_echo_server,
1817 validator_exit: config.validator_exit.clone(),
1818 cluster_info,
1819 bank_forks,
1820 blockstore,
1821 geyser_plugin_service,
1822 blockstore_metric_report_service,
1823 accounts_background_service,
1824 turbine_quic_endpoint,
1825 turbine_quic_endpoint_runtime,
1826 turbine_quic_endpoint_join_handle,
1827 repair_quic_endpoints,
1828 repair_quic_endpoints_runtime,
1829 repair_quic_endpoints_join_handle,
1830 xdp_retransmitter,
1831 _tpu_client_next_runtime: tpu_client_next_runtime,
1832 })
1833 }
1834
1835 pub fn exit(&mut self) {
1837 self.validator_exit.write().unwrap().exit();
1838
1839 self.blockstore.drop_signal();
1841 }
1842
1843 pub fn close(mut self) {
1844 self.exit();
1845 self.join();
1846 }
1847
1848 fn print_node_info(node: &Node) {
1849 info!("{:?}", node.info);
1850 info!(
1851 "local gossip address: {}",
1852 node.sockets.gossip[0].local_addr().unwrap()
1853 );
1854 info!(
1855 "local broadcast address: {}",
1856 node.sockets
1857 .broadcast
1858 .first()
1859 .unwrap()
1860 .local_addr()
1861 .unwrap()
1862 );
1863 info!(
1864 "local repair address: {}",
1865 node.sockets.repair.local_addr().unwrap()
1866 );
1867 info!(
1868 "local retransmit address: {}",
1869 node.sockets.retransmit_sockets[0].local_addr().unwrap()
1870 );
1871 }
1872
1873 pub fn join(self) {
1874 drop(self.bank_forks);
1875 drop(self.cluster_info);
1876
1877 self.poh_service.join().expect("poh_service");
1878 drop(self.poh_recorder);
1879
1880 if let Some(json_rpc_service) = self.json_rpc_service {
1881 json_rpc_service.join().expect("rpc_service");
1882 }
1883
1884 if let Some(pubsub_service) = self.pubsub_service {
1885 pubsub_service.join().expect("pubsub_service");
1886 }
1887
1888 if let Some(rpc_completed_slots_service) = self.rpc_completed_slots_service {
1889 rpc_completed_slots_service
1890 .join()
1891 .expect("rpc_completed_slots_service");
1892 }
1893
1894 if let Some(optimistically_confirmed_bank_tracker) =
1895 self.optimistically_confirmed_bank_tracker
1896 {
1897 optimistically_confirmed_bank_tracker
1898 .join()
1899 .expect("optimistically_confirmed_bank_tracker");
1900 }
1901
1902 if let Some(transaction_status_service) = self.transaction_status_service {
1903 transaction_status_service
1904 .join()
1905 .expect("transaction_status_service");
1906 }
1907
1908 if let Some(system_monitor_service) = self.system_monitor_service {
1909 system_monitor_service
1910 .join()
1911 .expect("system_monitor_service");
1912 }
1913
1914 if let Some(sample_performance_service) = self.sample_performance_service {
1915 sample_performance_service
1916 .join()
1917 .expect("sample_performance_service");
1918 }
1919
1920 if let Some(entry_notifier_service) = self.entry_notifier_service {
1921 entry_notifier_service
1922 .join()
1923 .expect("entry_notifier_service");
1924 }
1925
1926 if let Some(s) = self.snapshot_packager_service {
1927 s.join().expect("snapshot_packager_service");
1928 }
1929
1930 self.gossip_service.join().expect("gossip_service");
1931 self.repair_quic_endpoints
1932 .iter()
1933 .flatten()
1934 .for_each(repair::quic_endpoint::close_quic_endpoint);
1935 self.serve_repair_service
1936 .join()
1937 .expect("serve_repair_service");
1938 if let Some(repair_quic_endpoints_join_handle) = self.repair_quic_endpoints_join_handle {
1939 self.repair_quic_endpoints_runtime
1940 .map(|runtime| runtime.block_on(repair_quic_endpoints_join_handle))
1941 .transpose()
1942 .unwrap();
1943 }
1944 self.stats_reporter_service
1945 .join()
1946 .expect("stats_reporter_service");
1947 self.blockstore_metric_report_service
1948 .join()
1949 .expect("ledger_metric_report_service");
1950 self.accounts_background_service
1951 .join()
1952 .expect("accounts_background_service");
1953 if let Some(turbine_quic_endpoint) = &self.turbine_quic_endpoint {
1954 solana_turbine::quic_endpoint::close_quic_endpoint(turbine_quic_endpoint);
1955 }
1956 if let Some(xdp_retransmitter) = self.xdp_retransmitter {
1957 xdp_retransmitter.join().expect("xdp_retransmitter");
1958 }
1959 self.tpu.join().expect("tpu");
1960 self.tvu.join().expect("tvu");
1961 if let Some(turbine_quic_endpoint_join_handle) = self.turbine_quic_endpoint_join_handle {
1962 self.turbine_quic_endpoint_runtime
1963 .map(|runtime| runtime.block_on(turbine_quic_endpoint_join_handle))
1964 .transpose()
1965 .unwrap();
1966 }
1967 if let Some(completed_data_sets_service) = self.completed_data_sets_service {
1968 completed_data_sets_service
1969 .join()
1970 .expect("completed_data_sets_service");
1971 }
1972 if let Some(ip_echo_server) = self.ip_echo_server {
1973 ip_echo_server.shutdown_background();
1974 }
1975
1976 if let Some(geyser_plugin_service) = self.geyser_plugin_service {
1977 geyser_plugin_service.join().expect("geyser_plugin_service");
1978 }
1979 }
1980}
1981
1982fn active_vote_account_exists_in_bank(bank: &Bank, vote_account: &Pubkey) -> bool {
1983 if let Some(account) = &bank.get_account(vote_account) {
1984 if let Ok(vote_state) = VoteStateV4::deserialize(account.data(), vote_account) {
1985 return !vote_state.votes.is_empty();
1986 }
1987 }
1988 false
1989}
1990
1991fn check_poh_speed(bank: &Bank, maybe_hash_samples: Option<u64>) -> Result<(), ValidatorError> {
1992 let Some(hashes_per_tick) = bank.hashes_per_tick() else {
1993 warn!("Unable to read hashes per tick from Bank, skipping PoH speed check");
1994 return Ok(());
1995 };
1996
1997 let ticks_per_slot = bank.ticks_per_slot();
1998 let hashes_per_slot = hashes_per_tick * ticks_per_slot;
1999 let hash_samples = maybe_hash_samples.unwrap_or(hashes_per_slot);
2000
2001 let hash_time = compute_hash_time(hash_samples);
2002 let my_hashes_per_second = (hash_samples as f64 / hash_time.as_secs_f64()) as u64;
2003
2004 let target_slot_duration = Duration::from_nanos(bank.ns_per_slot as u64);
2005 let target_hashes_per_second =
2006 (hashes_per_slot as f64 / target_slot_duration.as_secs_f64()) as u64;
2007
2008 info!(
2009 "PoH speed check: computed hashes per second {my_hashes_per_second}, target hashes per \
2010 second {target_hashes_per_second}"
2011 );
2012 if my_hashes_per_second < target_hashes_per_second {
2013 return Err(ValidatorError::PohTooSlow {
2014 mine: my_hashes_per_second,
2015 target: target_hashes_per_second,
2016 });
2017 }
2018
2019 Ok(())
2020}
2021
2022fn maybe_cluster_restart_with_hard_fork(config: &ValidatorConfig, root_slot: Slot) -> Option<Slot> {
2023 if let Some(wait_slot_for_supermajority) = config.wait_for_supermajority {
2025 if wait_slot_for_supermajority == root_slot {
2026 return Some(wait_slot_for_supermajority);
2027 }
2028 }
2029
2030 None
2031}
2032
2033fn post_process_restored_tower(
2034 restored_tower: crate::consensus::Result<Tower>,
2035 validator_identity: &Pubkey,
2036 vote_account: &Pubkey,
2037 config: &ValidatorConfig,
2038 bank_forks: &BankForks,
2039) -> Result<Tower, String> {
2040 let mut should_require_tower = config.require_tower;
2041
2042 let restored_tower = restored_tower.and_then(|tower| {
2043 let root_bank = bank_forks.root_bank();
2044 let slot_history = root_bank.get_slot_history();
2045 let tower = tower.adjust_lockouts_after_replay(root_bank.slot(), &slot_history);
2047
2048 if let Some(hard_fork_restart_slot) =
2049 maybe_cluster_restart_with_hard_fork(config, root_bank.slot())
2050 {
2051 let message =
2055 format!("Hard fork is detected; discarding tower restoration result: {tower:?}");
2056 datapoint_error!("tower_error", ("error", message, String),);
2057 error!("{message}");
2058
2059 should_require_tower = false;
2062 return Err(crate::consensus::TowerError::HardFork(
2063 hard_fork_restart_slot,
2064 ));
2065 }
2066
2067 if let Some(warp_slot) = config.warp_slot {
2068 should_require_tower = false;
2071 return Err(crate::consensus::TowerError::HardFork(warp_slot));
2072 }
2073
2074 tower
2075 });
2076
2077 let restored_tower = match restored_tower {
2078 Ok(tower) => tower,
2079 Err(err) => {
2080 let voting_has_been_active =
2081 active_vote_account_exists_in_bank(&bank_forks.working_bank(), vote_account);
2082 if !err.is_file_missing() {
2083 datapoint_error!(
2084 "tower_error",
2085 ("error", format!("Unable to restore tower: {err}"), String),
2086 );
2087 }
2088 if should_require_tower && voting_has_been_active {
2089 return Err(format!(
2090 "Requested mandatory tower restore failed: {err}. And there is an existing \
2091 vote_account containing actual votes. Aborting due to possible conflicting \
2092 duplicate votes"
2093 ));
2094 }
2095 if err.is_file_missing() && !voting_has_been_active {
2096 info!(
2098 "Ignoring expected failed tower restore because this is the initial validator \
2099 start with the vote account..."
2100 );
2101 } else {
2102 error!(
2103 "Rebuilding a new tower from the latest vote account due to failed tower \
2104 restore: {err}"
2105 );
2106 }
2107
2108 Tower::new_from_bankforks(bank_forks, validator_identity, vote_account)
2109 }
2110 };
2111
2112 Ok(restored_tower)
2113}
2114
2115fn load_genesis(
2116 config: &ValidatorConfig,
2117 ledger_path: &Path,
2118) -> Result<GenesisConfig, ValidatorError> {
2119 let genesis_config = open_genesis_config(ledger_path, config.max_genesis_archive_unpacked_size)
2120 .map_err(ValidatorError::OpenGenesisConfig)?;
2121
2122 let leader_schedule_slot_offset = genesis_config.epoch_schedule.leader_schedule_slot_offset;
2125 let slots_per_epoch = genesis_config.epoch_schedule.slots_per_epoch;
2126 let leader_epoch_offset = leader_schedule_slot_offset.div_ceil(slots_per_epoch);
2127 assert!(leader_epoch_offset <= MAX_LEADER_SCHEDULE_EPOCH_OFFSET);
2128
2129 let genesis_hash = genesis_config.hash();
2130 info!("genesis hash: {genesis_hash}");
2131
2132 if let Some(expected_genesis_hash) = config.expected_genesis_hash {
2133 if genesis_hash != expected_genesis_hash {
2134 return Err(ValidatorError::GenesisHashMismatch(
2135 genesis_hash,
2136 expected_genesis_hash,
2137 ));
2138 }
2139 }
2140
2141 Ok(genesis_config)
2142}
2143
2144#[allow(clippy::type_complexity)]
2145fn load_blockstore(
2146 config: &ValidatorConfig,
2147 ledger_path: &Path,
2148 genesis_config: &GenesisConfig,
2149 exit: Arc<AtomicBool>,
2150 start_progress: &Arc<RwLock<ValidatorStartProgress>>,
2151 accounts_update_notifier: Option<AccountsUpdateNotifier>,
2152 transaction_notifier: Option<TransactionNotifierArc>,
2153 entry_notifier: Option<EntryNotifierArc>,
2154 dependency_tracker: Option<Arc<DependencyTracker>>,
2155) -> Result<
2156 (
2157 Arc<RwLock<BankForks>>,
2158 Arc<Blockstore>,
2159 Slot,
2160 Receiver<bool>,
2161 LeaderScheduleCache,
2162 Option<StartingSnapshotHashes>,
2163 TransactionHistoryServices,
2164 blockstore_processor::ProcessOptions,
2165 BlockstoreRootScan,
2166 DroppedSlotsReceiver,
2167 Option<EntryNotifierService>,
2168 ),
2169 String,
2170> {
2171 info!("loading ledger from {ledger_path:?}...");
2172 *start_progress.write().unwrap() = ValidatorStartProgress::LoadingLedger;
2173
2174 let blockstore = Blockstore::open_with_options(ledger_path, config.blockstore_options.clone())
2175 .map_err(|err| format!("Failed to open Blockstore: {err:?}"))?;
2176
2177 let (ledger_signal_sender, ledger_signal_receiver) = bounded(MAX_REPLAY_WAKE_UP_SIGNALS);
2178 blockstore.add_new_shred_signal(ledger_signal_sender);
2179
2180 let original_blockstore_root = blockstore.max_root();
2183
2184 let blockstore = Arc::new(blockstore);
2185 let blockstore_root_scan = BlockstoreRootScan::new(config, blockstore.clone(), exit.clone());
2186 let halt_at_slot = config
2187 .halt_at_slot
2188 .or_else(|| blockstore.highest_slot().unwrap_or(None));
2189
2190 let process_options = blockstore_processor::ProcessOptions {
2191 run_verification: config.run_verification,
2192 halt_at_slot,
2193 new_hard_forks: config.new_hard_forks.clone(),
2194 debug_keys: config.debug_keys.clone(),
2195 accounts_db_config: config.accounts_db_config.clone(),
2196 accounts_db_skip_shrink: config.accounts_db_skip_shrink,
2197 accounts_db_force_initial_clean: config.accounts_db_force_initial_clean,
2198 runtime_config: config.runtime_config.clone(),
2199 use_snapshot_archives_at_startup: config.use_snapshot_archives_at_startup,
2200 ..blockstore_processor::ProcessOptions::default()
2201 };
2202
2203 let enable_rpc_transaction_history =
2204 config.rpc_addrs.is_some() && config.rpc_config.enable_rpc_transaction_history;
2205 let is_plugin_transaction_history_required = transaction_notifier.as_ref().is_some();
2206 let transaction_history_services =
2207 if enable_rpc_transaction_history || is_plugin_transaction_history_required {
2208 initialize_rpc_transaction_history_services(
2209 blockstore.clone(),
2210 exit.clone(),
2211 enable_rpc_transaction_history,
2212 config.rpc_config.enable_extended_tx_metadata_storage,
2213 transaction_notifier,
2214 dependency_tracker,
2215 )
2216 } else {
2217 TransactionHistoryServices::default()
2218 };
2219
2220 let entry_notifier_service = entry_notifier
2221 .map(|entry_notifier| EntryNotifierService::new(entry_notifier, exit.clone()));
2222
2223 let (bank_forks, mut leader_schedule_cache, starting_snapshot_hashes) =
2224 bank_forks_utils::load_bank_forks(
2225 genesis_config,
2226 &blockstore,
2227 config.account_paths.clone(),
2228 &config.snapshot_config,
2229 &process_options,
2230 transaction_history_services
2231 .transaction_status_sender
2232 .as_ref(),
2233 entry_notifier_service
2234 .as_ref()
2235 .map(|service| service.sender()),
2236 accounts_update_notifier,
2237 exit,
2238 )
2239 .map_err(|err| err.to_string())?;
2240
2241 let pruned_banks_receiver =
2247 AccountsBackgroundService::setup_bank_drop_callback(bank_forks.clone());
2248
2249 leader_schedule_cache.set_fixed_leader_schedule(config.fixed_leader_schedule.clone());
2250
2251 Ok((
2252 bank_forks,
2253 blockstore,
2254 original_blockstore_root,
2255 ledger_signal_receiver,
2256 leader_schedule_cache,
2257 starting_snapshot_hashes,
2258 transaction_history_services,
2259 process_options,
2260 blockstore_root_scan,
2261 pruned_banks_receiver,
2262 entry_notifier_service,
2263 ))
2264}
2265
2266pub struct ProcessBlockStore<'a> {
2267 id: &'a Pubkey,
2268 vote_account: &'a Pubkey,
2269 start_progress: &'a Arc<RwLock<ValidatorStartProgress>>,
2270 blockstore: &'a Blockstore,
2271 original_blockstore_root: Slot,
2272 bank_forks: &'a Arc<RwLock<BankForks>>,
2273 leader_schedule_cache: &'a LeaderScheduleCache,
2274 process_options: &'a blockstore_processor::ProcessOptions,
2275 transaction_status_sender: Option<&'a TransactionStatusSender>,
2276 entry_notification_sender: Option<&'a EntryNotifierSender>,
2277 blockstore_root_scan: Option<BlockstoreRootScan>,
2278 snapshot_controller: &'a SnapshotController,
2279 config: &'a ValidatorConfig,
2280 tower: Option<Tower>,
2281}
2282
2283impl<'a> ProcessBlockStore<'a> {
2284 #[allow(clippy::too_many_arguments)]
2285 fn new(
2286 id: &'a Pubkey,
2287 vote_account: &'a Pubkey,
2288 start_progress: &'a Arc<RwLock<ValidatorStartProgress>>,
2289 blockstore: &'a Blockstore,
2290 original_blockstore_root: Slot,
2291 bank_forks: &'a Arc<RwLock<BankForks>>,
2292 leader_schedule_cache: &'a LeaderScheduleCache,
2293 process_options: &'a blockstore_processor::ProcessOptions,
2294 transaction_status_sender: Option<&'a TransactionStatusSender>,
2295 entry_notification_sender: Option<&'a EntryNotifierSender>,
2296 blockstore_root_scan: BlockstoreRootScan,
2297 snapshot_controller: &'a SnapshotController,
2298 config: &'a ValidatorConfig,
2299 ) -> Self {
2300 Self {
2301 id,
2302 vote_account,
2303 start_progress,
2304 blockstore,
2305 original_blockstore_root,
2306 bank_forks,
2307 leader_schedule_cache,
2308 process_options,
2309 transaction_status_sender,
2310 entry_notification_sender,
2311 blockstore_root_scan: Some(blockstore_root_scan),
2312 snapshot_controller,
2313 config,
2314 tower: None,
2315 }
2316 }
2317
2318 pub(crate) fn process(&mut self) -> Result<(), String> {
2319 if self.tower.is_none() {
2320 let previous_start_process = *self.start_progress.read().unwrap();
2321 *self.start_progress.write().unwrap() = ValidatorStartProgress::LoadingLedger;
2322
2323 let exit = Arc::new(AtomicBool::new(false));
2324 if let Ok(Some(max_slot)) = self.blockstore.highest_slot() {
2325 let bank_forks = self.bank_forks.clone();
2326 let exit = exit.clone();
2327 let start_progress = self.start_progress.clone();
2328
2329 let _ = Builder::new()
2330 .name("solRptLdgrStat".to_string())
2331 .spawn(move || {
2332 while !exit.load(Ordering::Relaxed) {
2333 let slot = bank_forks.read().unwrap().working_bank().slot();
2334 *start_progress.write().unwrap() =
2335 ValidatorStartProgress::ProcessingLedger { slot, max_slot };
2336 sleep(Duration::from_secs(2));
2337 }
2338 })
2339 .unwrap();
2340 }
2341 blockstore_processor::process_blockstore_from_root(
2342 self.blockstore,
2343 self.bank_forks,
2344 self.leader_schedule_cache,
2345 self.process_options,
2346 self.transaction_status_sender,
2347 self.entry_notification_sender,
2348 Some(self.snapshot_controller),
2349 )
2350 .map_err(|err| {
2351 exit.store(true, Ordering::Relaxed);
2352 format!("Failed to load ledger: {err:?}")
2353 })?;
2354 exit.store(true, Ordering::Relaxed);
2355
2356 if let Some(blockstore_root_scan) = self.blockstore_root_scan.take() {
2357 blockstore_root_scan.join();
2358 }
2359
2360 self.tower = Some({
2361 let restored_tower = Tower::restore(self.config.tower_storage.as_ref(), self.id);
2362 if let Ok(tower) = &restored_tower {
2363 reconcile_blockstore_roots_with_external_source(
2365 ExternalRootSource::Tower(tower.root()),
2366 self.blockstore,
2367 &mut self.original_blockstore_root,
2368 )
2369 .map_err(|err| format!("Failed to reconcile blockstore with tower: {err:?}"))?;
2370 }
2371
2372 post_process_restored_tower(
2373 restored_tower,
2374 self.id,
2375 self.vote_account,
2376 self.config,
2377 &self.bank_forks.read().unwrap(),
2378 )?
2379 });
2380
2381 if let Some(hard_fork_restart_slot) = maybe_cluster_restart_with_hard_fork(
2382 self.config,
2383 self.bank_forks.read().unwrap().root(),
2384 ) {
2385 reconcile_blockstore_roots_with_external_source(
2388 ExternalRootSource::HardFork(hard_fork_restart_slot),
2389 self.blockstore,
2390 &mut self.original_blockstore_root,
2391 )
2392 .map_err(|err| format!("Failed to reconcile blockstore with hard fork: {err:?}"))?;
2393 }
2394
2395 *self.start_progress.write().unwrap() = previous_start_process;
2396 }
2397 Ok(())
2398 }
2399
2400 pub(crate) fn process_to_create_tower(mut self) -> Result<Tower, String> {
2401 self.process()?;
2402 Ok(self.tower.unwrap())
2403 }
2404}
2405
2406fn maybe_warp_slot(
2407 config: &ValidatorConfig,
2408 process_blockstore: &mut ProcessBlockStore,
2409 ledger_path: &Path,
2410 bank_forks: &RwLock<BankForks>,
2411 leader_schedule_cache: &LeaderScheduleCache,
2412 snapshot_controller: &SnapshotController,
2413) -> Result<(), String> {
2414 if let Some(warp_slot) = config.warp_slot {
2415 let mut bank_forks = bank_forks.write().unwrap();
2416
2417 let working_bank = bank_forks.working_bank();
2418
2419 if warp_slot <= working_bank.slot() {
2420 return Err(format!(
2421 "warp slot ({}) cannot be less than the working bank slot ({})",
2422 warp_slot,
2423 working_bank.slot()
2424 ));
2425 }
2426 info!("warping to slot {warp_slot}");
2427
2428 let root_bank = bank_forks.root_bank();
2429
2430 root_bank.squash();
2434 root_bank.force_flush_accounts_cache();
2435
2436 bank_forks.insert(Bank::warp_from_parent(
2437 root_bank,
2438 &Pubkey::default(),
2439 warp_slot,
2440 ));
2441 bank_forks.set_root(warp_slot, Some(snapshot_controller), Some(warp_slot));
2442 leader_schedule_cache.set_root(&bank_forks.root_bank());
2443
2444 let full_snapshot_archive_info = match snapshot_bank_utils::bank_to_full_snapshot_archive(
2445 ledger_path,
2446 &bank_forks.root_bank(),
2447 None,
2448 &config.snapshot_config.full_snapshot_archives_dir,
2449 &config.snapshot_config.incremental_snapshot_archives_dir,
2450 config.snapshot_config.archive_format,
2451 ) {
2452 Ok(archive_info) => archive_info,
2453 Err(e) => return Err(format!("Unable to create snapshot: {e}")),
2454 };
2455 info!(
2456 "created snapshot: {}",
2457 full_snapshot_archive_info.path().display()
2458 );
2459
2460 drop(bank_forks);
2461 process_blockstore.process()?;
2464 }
2465 Ok(())
2466}
2467
2468fn should_cleanup_blockstore_incorrect_shred_versions(
2471 config: &ValidatorConfig,
2472 blockstore: &Blockstore,
2473 root_slot: Slot,
2474 hard_forks: &HardForks,
2475) -> Result<Option<Slot>, BlockstoreError> {
2476 let maybe_cluster_restart_slot = maybe_cluster_restart_with_hard_fork(config, root_slot);
2478 if maybe_cluster_restart_slot.is_some() {
2479 return Ok(Some(root_slot + 1));
2480 }
2481
2482 let Some(latest_hard_fork) = hard_forks.iter().last().map(|(slot, _)| *slot) else {
2484 return Ok(None);
2485 };
2486
2487 let Some(blockstore_max_slot) = blockstore.highest_slot()? else {
2489 return Ok(None);
2490 };
2491 let blockstore_min_slot = blockstore.lowest_slot();
2492 info!(
2493 "Blockstore contains data from slot {blockstore_min_slot} to {blockstore_max_slot}, the \
2494 latest hard fork is {latest_hard_fork}"
2495 );
2496
2497 if latest_hard_fork < blockstore_min_slot {
2498 Ok(None)
2506 } else if latest_hard_fork < blockstore_max_slot {
2507 Ok(Some(latest_hard_fork + 1))
2518 } else {
2519 Ok(None)
2527 }
2528}
2529
2530fn scan_blockstore_for_incorrect_shred_version(
2533 blockstore: &Blockstore,
2534 start_slot: Slot,
2535 expected_shred_version: u16,
2536) -> Result<Option<u16>, BlockstoreError> {
2537 const TIMEOUT: Duration = Duration::from_secs(60);
2538 let timer = Instant::now();
2539 let slot_meta_iterator = blockstore.slot_meta_iterator(start_slot)?;
2541
2542 info!("Searching blockstore for shred with incorrect version from slot {start_slot}");
2543 for (slot, _meta) in slot_meta_iterator {
2544 let shreds = blockstore.get_data_shreds_for_slot(slot, 0)?;
2545 for shred in &shreds {
2546 if shred.version() != expected_shred_version {
2547 return Ok(Some(shred.version()));
2548 }
2549 }
2550 if timer.elapsed() > TIMEOUT {
2551 info!("Didn't find incorrect shreds after 60 seconds, aborting");
2552 break;
2553 }
2554 }
2555 Ok(None)
2556}
2557
2558fn cleanup_blockstore_incorrect_shred_versions(
2561 blockstore: &Blockstore,
2562 config: &ValidatorConfig,
2563 start_slot: Slot,
2564 expected_shred_version: u16,
2565) -> Result<(), BlockstoreError> {
2566 let incorrect_shred_version = scan_blockstore_for_incorrect_shred_version(
2567 blockstore,
2568 start_slot,
2569 expected_shred_version,
2570 )?;
2571 let Some(incorrect_shred_version) = incorrect_shred_version else {
2572 info!("Only shreds with the correct version were found in the blockstore");
2573 return Ok(());
2574 };
2575
2576 let end_slot = blockstore.highest_slot()?.unwrap();
2578
2579 let backup_folder = format!(
2582 "{BLOCKSTORE_DIRECTORY_ROCKS_LEVEL}_backup_{incorrect_shred_version}_{start_slot}_{end_slot}"
2583 );
2584 match Blockstore::open_with_options(
2585 &blockstore.ledger_path().join(backup_folder),
2586 config.blockstore_options.clone(),
2587 ) {
2588 Ok(backup_blockstore) => {
2589 info!("Backing up slots from {start_slot} to {end_slot}");
2590 let mut timer = Measure::start("blockstore backup");
2591
2592 const PRINT_INTERVAL: Duration = Duration::from_secs(5);
2593 let mut print_timer = Instant::now();
2594 let mut num_slots_copied = 0;
2595 let slot_meta_iterator = blockstore.slot_meta_iterator(start_slot)?;
2596 for (slot, _meta) in slot_meta_iterator {
2597 let shreds = blockstore.get_data_shreds_for_slot(slot, 0)?;
2598 let shreds = shreds.into_iter().map(Cow::Owned);
2599 let _ = backup_blockstore.insert_cow_shreds(shreds, None, true);
2600 num_slots_copied += 1;
2601
2602 if print_timer.elapsed() > PRINT_INTERVAL {
2603 info!("Backed up {num_slots_copied} slots thus far");
2604 print_timer = Instant::now();
2605 }
2606 }
2607
2608 timer.stop();
2609 info!("Backing up slots done. {timer}");
2610 }
2611 Err(err) => {
2612 warn!("Unable to backup shreds with incorrect shred version: {err}");
2613 }
2614 }
2615
2616 info!("Purging slots {start_slot} to {end_slot} from blockstore");
2617 let mut timer = Measure::start("blockstore purge");
2618 blockstore.purge_from_next_slots(start_slot, end_slot);
2619 blockstore.purge_slots(start_slot, end_slot, PurgeType::Exact);
2620 timer.stop();
2621 info!("Purging slots done. {timer}");
2622
2623 Ok(())
2624}
2625
2626fn initialize_rpc_transaction_history_services(
2627 blockstore: Arc<Blockstore>,
2628 exit: Arc<AtomicBool>,
2629 enable_rpc_transaction_history: bool,
2630 enable_extended_tx_metadata_storage: bool,
2631 transaction_notifier: Option<TransactionNotifierArc>,
2632 dependency_tracker: Option<Arc<DependencyTracker>>,
2633) -> TransactionHistoryServices {
2634 let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root()));
2635 let (transaction_status_sender, transaction_status_receiver) = unbounded();
2636 let transaction_status_sender = Some(TransactionStatusSender {
2637 sender: transaction_status_sender,
2638 dependency_tracker: dependency_tracker.clone(),
2639 });
2640 let transaction_status_service = Some(TransactionStatusService::new(
2641 transaction_status_receiver,
2642 max_complete_transaction_status_slot.clone(),
2643 enable_rpc_transaction_history,
2644 transaction_notifier,
2645 blockstore.clone(),
2646 enable_extended_tx_metadata_storage,
2647 dependency_tracker,
2648 exit.clone(),
2649 ));
2650
2651 TransactionHistoryServices {
2652 transaction_status_sender,
2653 transaction_status_service,
2654 max_complete_transaction_status_slot,
2655 }
2656}
2657
2658#[derive(Error, Debug)]
2659pub enum ValidatorError {
2660 #[error("bank hash mismatch: actual={0}, expected={1}")]
2661 BankHashMismatch(Hash, Hash),
2662
2663 #[error("blockstore error: {0}")]
2664 Blockstore(#[source] BlockstoreError),
2665
2666 #[error("genesis hash mismatch: actual={0}, expected={1}")]
2667 GenesisHashMismatch(Hash, Hash),
2668
2669 #[error(
2670 "ledger does not have enough data to wait for supermajority: current slot={0}, needed \
2671 slot={1}"
2672 )]
2673 NotEnoughLedgerData(Slot, Slot),
2674
2675 #[error("failed to open genesis: {0}")]
2676 OpenGenesisConfig(#[source] OpenGenesisConfigError),
2677
2678 #[error("{0}")]
2679 Other(String),
2680
2681 #[error(
2682 "PoH hashes/second rate is slower than the cluster target: mine {mine}, cluster {target}"
2683 )]
2684 PohTooSlow { mine: u64, target: u64 },
2685
2686 #[error(transparent)]
2687 ResourceLimitError(#[from] ResourceLimitError),
2688
2689 #[error("shred version mismatch: actual {actual}, expected {expected}")]
2690 ShredVersionMismatch { actual: u16, expected: u16 },
2691
2692 #[error(transparent)]
2693 TraceError(#[from] TraceError),
2694
2695 #[error("Wen Restart finished, please continue with --wait-for-supermajority")]
2696 WenRestartFinished,
2697}
2698
2699fn wait_for_supermajority(
2706 config: &ValidatorConfig,
2707 process_blockstore: Option<&mut ProcessBlockStore>,
2708 bank_forks: &RwLock<BankForks>,
2709 cluster_info: &ClusterInfo,
2710 rpc_override_health_check: Arc<AtomicBool>,
2711 start_progress: &Arc<RwLock<ValidatorStartProgress>>,
2712) -> Result<bool, ValidatorError> {
2713 match config.wait_for_supermajority {
2714 None => Ok(false),
2715 Some(wait_for_supermajority_slot) => {
2716 if let Some(process_blockstore) = process_blockstore {
2717 process_blockstore
2718 .process()
2719 .map_err(ValidatorError::Other)?;
2720 }
2721
2722 let bank = bank_forks.read().unwrap().working_bank();
2723 match wait_for_supermajority_slot.cmp(&bank.slot()) {
2724 std::cmp::Ordering::Less => return Ok(false),
2725 std::cmp::Ordering::Greater => {
2726 return Err(ValidatorError::NotEnoughLedgerData(
2727 bank.slot(),
2728 wait_for_supermajority_slot,
2729 ));
2730 }
2731 _ => {}
2732 }
2733
2734 if let Some(expected_bank_hash) = config.expected_bank_hash {
2735 if bank.hash() != expected_bank_hash {
2736 return Err(ValidatorError::BankHashMismatch(
2737 bank.hash(),
2738 expected_bank_hash,
2739 ));
2740 }
2741 }
2742
2743 for i in 1.. {
2744 let logging = i % 10 == 1;
2745 if logging {
2746 info!(
2747 "Waiting for {}% of activated stake at slot {} to be in gossip...",
2748 WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT,
2749 bank.slot()
2750 );
2751 }
2752
2753 let gossip_stake_percent =
2754 get_stake_percent_in_gossip(&bank, cluster_info, logging);
2755
2756 *start_progress.write().unwrap() =
2757 ValidatorStartProgress::WaitingForSupermajority {
2758 slot: wait_for_supermajority_slot,
2759 gossip_stake_percent,
2760 };
2761
2762 if gossip_stake_percent >= WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT {
2763 info!(
2764 "Supermajority reached, {gossip_stake_percent}% active stake detected, \
2765 starting up now.",
2766 );
2767 break;
2768 }
2769 rpc_override_health_check.store(true, Ordering::Relaxed);
2773 sleep(Duration::new(1, 0));
2774 }
2775 rpc_override_health_check.store(false, Ordering::Relaxed);
2776 Ok(true)
2777 }
2778 }
2779}
2780
2781fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: bool) -> u64 {
2783 let mut online_stake = 0;
2784 let mut wrong_shred_stake = 0;
2785 let mut wrong_shred_nodes = vec![];
2786 let mut offline_stake = 0;
2787 let mut offline_nodes = vec![];
2788
2789 let mut total_activated_stake = 0;
2790 let now = timestamp();
2791 let peers: HashMap<_, _> = cluster_info
2795 .tvu_peers(ContactInfo::clone)
2796 .into_iter()
2797 .filter(|node| {
2798 let age = now.saturating_sub(node.wallclock());
2799 age < CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS
2801 })
2802 .map(|node| (*node.pubkey(), node))
2803 .collect();
2804 let my_shred_version = cluster_info.my_shred_version();
2805 let my_id = cluster_info.id();
2806
2807 for (activated_stake, vote_account) in bank.vote_accounts().values() {
2808 let activated_stake = *activated_stake;
2809 total_activated_stake += activated_stake;
2810
2811 if activated_stake == 0 {
2812 continue;
2813 }
2814 let vote_state_node_pubkey = *vote_account.node_pubkey();
2815
2816 if let Some(peer) = peers.get(&vote_state_node_pubkey) {
2817 if peer.shred_version() == my_shred_version {
2818 trace!(
2819 "observed {vote_state_node_pubkey} in gossip, \
2820 (activated_stake={activated_stake})"
2821 );
2822 online_stake += activated_stake;
2823 } else {
2824 wrong_shred_stake += activated_stake;
2825 wrong_shred_nodes.push((activated_stake, vote_state_node_pubkey));
2826 }
2827 } else if vote_state_node_pubkey == my_id {
2828 online_stake += activated_stake; } else {
2830 offline_stake += activated_stake;
2831 offline_nodes.push((activated_stake, vote_state_node_pubkey));
2832 }
2833 }
2834
2835 let online_stake_percentage = (online_stake as f64 / total_activated_stake as f64) * 100.;
2836 if log {
2837 info!("{online_stake_percentage:.3}% of active stake visible in gossip");
2838
2839 if !wrong_shred_nodes.is_empty() {
2840 info!(
2841 "{:.3}% of active stake has the wrong shred version in gossip",
2842 (wrong_shred_stake as f64 / total_activated_stake as f64) * 100.,
2843 );
2844 wrong_shred_nodes.sort_by(|b, a| a.0.cmp(&b.0)); for (stake, identity) in wrong_shred_nodes {
2846 info!(
2847 " {:.3}% - {}",
2848 (stake as f64 / total_activated_stake as f64) * 100.,
2849 identity
2850 );
2851 }
2852 }
2853
2854 if !offline_nodes.is_empty() {
2855 info!(
2856 "{:.3}% of active stake is not visible in gossip",
2857 (offline_stake as f64 / total_activated_stake as f64) * 100.
2858 );
2859 offline_nodes.sort_by(|b, a| a.0.cmp(&b.0)); for (stake, identity) in offline_nodes {
2861 info!(
2862 " {:.3}% - {}",
2863 (stake as f64 / total_activated_stake as f64) * 100.,
2864 identity
2865 );
2866 }
2867 }
2868 }
2869
2870 online_stake_percentage as u64
2871}
2872
2873fn cleanup_accounts_paths(config: &ValidatorConfig) {
2874 for account_path in &config.account_paths {
2875 move_and_async_delete_path_contents(account_path);
2876 }
2877 if let Some(shrink_paths) = &config.accounts_db_config.shrink_paths {
2878 for shrink_path in shrink_paths {
2879 move_and_async_delete_path_contents(shrink_path);
2880 }
2881 }
2882}
2883
2884pub fn is_snapshot_config_valid(snapshot_config: &SnapshotConfig) -> bool {
2885 if !snapshot_config.should_generate_snapshots() {
2887 return true;
2888 }
2889
2890 let SnapshotInterval::Slots(full_snapshot_interval_slots) =
2891 snapshot_config.full_snapshot_archive_interval
2892 else {
2893 return false;
2895 };
2896
2897 match snapshot_config.incremental_snapshot_archive_interval {
2898 SnapshotInterval::Disabled => true,
2899 SnapshotInterval::Slots(incremental_snapshot_interval_slots) => {
2900 full_snapshot_interval_slots > incremental_snapshot_interval_slots
2901 }
2902 }
2903}
2904
2905#[cfg(test)]
2906mod tests {
2907 use {
2908 super::*,
2909 crossbeam_channel::{bounded, RecvTimeoutError},
2910 solana_entry::entry,
2911 solana_genesis_config::create_genesis_config,
2912 solana_gossip::contact_info::ContactInfo,
2913 solana_ledger::{
2914 blockstore, create_new_tmp_ledger, genesis_utils::create_genesis_config_with_leader,
2915 get_tmp_ledger_path_auto_delete,
2916 },
2917 solana_poh_config::PohConfig,
2918 solana_sha256_hasher::hash,
2919 solana_tpu_client::tpu_client::DEFAULT_TPU_ENABLE_UDP,
2920 std::{fs::remove_dir_all, num::NonZeroU64, thread, time::Duration},
2921 };
2922
2923 #[test]
2924 fn validator_exit() {
2925 agave_logger::setup();
2926 let leader_keypair = Keypair::new();
2927 let leader_node = Node::new_localhost_with_pubkey(&leader_keypair.pubkey());
2928
2929 let validator_keypair = Keypair::new();
2930 let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey());
2931 let genesis_config =
2932 create_genesis_config_with_leader(10_000, &leader_keypair.pubkey(), 1000)
2933 .genesis_config;
2934 let (validator_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
2935
2936 let voting_keypair = Arc::new(Keypair::new());
2937 let config = ValidatorConfig {
2938 rpc_addrs: Some((
2939 validator_node.info.rpc().unwrap(),
2940 validator_node.info.rpc_pubsub().unwrap(),
2941 )),
2942 ..ValidatorConfig::default_for_test()
2943 };
2944 let start_progress = Arc::new(RwLock::new(ValidatorStartProgress::default()));
2945 let validator = Validator::new(
2946 validator_node,
2947 Arc::new(validator_keypair),
2948 &validator_ledger_path,
2949 &voting_keypair.pubkey(),
2950 Arc::new(RwLock::new(vec![voting_keypair])),
2951 vec![leader_node.info],
2952 &config,
2953 true, None, start_progress.clone(),
2956 SocketAddrSpace::Unspecified,
2957 ValidatorTpuConfig::new_for_tests(DEFAULT_TPU_ENABLE_UDP),
2958 Arc::new(RwLock::new(None)),
2959 )
2960 .expect("assume successful validator start");
2961 assert_eq!(
2962 *start_progress.read().unwrap(),
2963 ValidatorStartProgress::Running
2964 );
2965 validator.close();
2966 remove_dir_all(validator_ledger_path).unwrap();
2967 }
2968
2969 #[test]
2970 fn test_should_cleanup_blockstore_incorrect_shred_versions() {
2971 agave_logger::setup();
2972
2973 let ledger_path = get_tmp_ledger_path_auto_delete!();
2974 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
2975
2976 let mut validator_config = ValidatorConfig::default_for_test();
2977 let mut hard_forks = HardForks::default();
2978 let mut root_slot;
2979
2980 root_slot = 10;
2982 validator_config.wait_for_supermajority = Some(root_slot);
2983 assert_eq!(
2984 should_cleanup_blockstore_incorrect_shred_versions(
2985 &validator_config,
2986 &blockstore,
2987 root_slot,
2988 &hard_forks
2989 )
2990 .unwrap(),
2991 Some(root_slot + 1)
2992 );
2993
2994 root_slot = 15;
2997 assert_eq!(
2998 should_cleanup_blockstore_incorrect_shred_versions(
2999 &validator_config,
3000 &blockstore,
3001 root_slot,
3002 &hard_forks
3003 )
3004 .unwrap(),
3005 None,
3006 );
3007
3008 hard_forks.register(10);
3011 assert_eq!(
3012 should_cleanup_blockstore_incorrect_shred_versions(
3013 &validator_config,
3014 &blockstore,
3015 root_slot,
3016 &hard_forks
3017 )
3018 .unwrap(),
3019 None,
3020 );
3021
3022 let entries = entry::create_ticks(1, 0, Hash::default());
3024 for i in 20..35 {
3025 let shreds = blockstore::entries_to_test_shreds(
3026 &entries,
3027 i, i - 1, true, 1, );
3032 blockstore.insert_shreds(shreds, None, true).unwrap();
3033 }
3034
3035 assert_eq!(
3037 should_cleanup_blockstore_incorrect_shred_versions(
3038 &validator_config,
3039 &blockstore,
3040 root_slot,
3041 &hard_forks
3042 )
3043 .unwrap(),
3044 None,
3045 );
3046
3047 root_slot = 25;
3050 hard_forks.register(root_slot);
3051 validator_config.wait_for_supermajority = Some(root_slot);
3052 assert_eq!(
3053 should_cleanup_blockstore_incorrect_shred_versions(
3054 &validator_config,
3055 &blockstore,
3056 root_slot,
3057 &hard_forks
3058 )
3059 .unwrap(),
3060 Some(root_slot + 1),
3061 );
3062 validator_config.wait_for_supermajority = None;
3063 assert_eq!(
3064 should_cleanup_blockstore_incorrect_shred_versions(
3065 &validator_config,
3066 &blockstore,
3067 root_slot,
3068 &hard_forks
3069 )
3070 .unwrap(),
3071 Some(root_slot + 1),
3072 );
3073
3074 root_slot = 30;
3077 let latest_hard_fork = hard_forks.iter().last().unwrap().0;
3078 assert_eq!(
3079 should_cleanup_blockstore_incorrect_shred_versions(
3080 &validator_config,
3081 &blockstore,
3082 root_slot,
3083 &hard_forks
3084 )
3085 .unwrap(),
3086 Some(latest_hard_fork + 1),
3087 );
3088
3089 blockstore.purge_slots(0, latest_hard_fork, PurgeType::Exact);
3092 assert_eq!(
3093 should_cleanup_blockstore_incorrect_shred_versions(
3094 &validator_config,
3095 &blockstore,
3096 root_slot,
3097 &hard_forks
3098 )
3099 .unwrap(),
3100 None,
3101 );
3102 }
3103
3104 #[test]
3105 fn test_cleanup_blockstore_incorrect_shred_versions() {
3106 agave_logger::setup();
3107
3108 let validator_config = ValidatorConfig::default_for_test();
3109 let ledger_path = get_tmp_ledger_path_auto_delete!();
3110 let blockstore = Blockstore::open(ledger_path.path()).unwrap();
3111
3112 let entries = entry::create_ticks(1, 0, Hash::default());
3113 for i in 1..10 {
3114 let shreds = blockstore::entries_to_test_shreds(
3115 &entries,
3116 i, i - 1, true, 1, );
3121 blockstore.insert_shreds(shreds, None, true).unwrap();
3122 }
3123
3124 cleanup_blockstore_incorrect_shred_versions(&blockstore, &validator_config, 5, 2).unwrap();
3126 assert!(blockstore.meta(4).unwrap().unwrap().next_slots.is_empty());
3128 for i in 5..10 {
3129 assert!(blockstore
3130 .get_data_shreds_for_slot(i, 0)
3131 .unwrap()
3132 .is_empty());
3133 }
3134 }
3135
3136 #[test]
3137 fn validator_parallel_exit() {
3138 let leader_keypair = Keypair::new();
3139 let leader_node = Node::new_localhost_with_pubkey(&leader_keypair.pubkey());
3140 let genesis_config =
3141 create_genesis_config_with_leader(10_000, &leader_keypair.pubkey(), 1000)
3142 .genesis_config;
3143
3144 let mut ledger_paths = vec![];
3145 let mut validators: Vec<Validator> = (0..2)
3146 .map(|_| {
3147 let validator_keypair = Keypair::new();
3148 let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey());
3149 let (validator_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
3150 ledger_paths.push(validator_ledger_path.clone());
3151 let vote_account_keypair = Keypair::new();
3152 let config = ValidatorConfig {
3153 rpc_addrs: Some((
3154 validator_node.info.rpc().unwrap(),
3155 validator_node.info.rpc_pubsub().unwrap(),
3156 )),
3157 ..ValidatorConfig::default_for_test()
3158 };
3159 Validator::new(
3160 validator_node,
3161 Arc::new(validator_keypair),
3162 &validator_ledger_path,
3163 &vote_account_keypair.pubkey(),
3164 Arc::new(RwLock::new(vec![Arc::new(vote_account_keypair)])),
3165 vec![leader_node.info.clone()],
3166 &config,
3167 true, None, Arc::new(RwLock::new(ValidatorStartProgress::default())),
3170 SocketAddrSpace::Unspecified,
3171 ValidatorTpuConfig::new_for_tests(DEFAULT_TPU_ENABLE_UDP),
3172 Arc::new(RwLock::new(None)),
3173 )
3174 .expect("assume successful validator start")
3175 })
3176 .collect();
3177
3178 validators.iter_mut().for_each(|v| v.exit());
3180
3181 let (sender, receiver) = bounded(0);
3183 let _ = thread::spawn(move || {
3184 validators.into_iter().for_each(|validator| {
3185 validator.join();
3186 });
3187 sender.send(()).unwrap();
3188 });
3189
3190 let timeout = Duration::from_secs(60);
3191 if let Err(RecvTimeoutError::Timeout) = receiver.recv_timeout(timeout) {
3192 panic!("timeout for shutting down validators",);
3193 }
3194
3195 for path in ledger_paths {
3196 remove_dir_all(path).unwrap();
3197 }
3198 }
3199
3200 #[test]
3201 fn test_wait_for_supermajority() {
3202 agave_logger::setup();
3203 let node_keypair = Arc::new(Keypair::new());
3204 let cluster_info = ClusterInfo::new(
3205 ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
3206 node_keypair,
3207 SocketAddrSpace::Unspecified,
3208 );
3209
3210 let (genesis_config, _mint_keypair) = create_genesis_config(1);
3211 let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config));
3212 let mut config = ValidatorConfig::default_for_test();
3213 let rpc_override_health_check = Arc::new(AtomicBool::new(false));
3214 let start_progress = Arc::new(RwLock::new(ValidatorStartProgress::default()));
3215
3216 assert!(!wait_for_supermajority(
3217 &config,
3218 None,
3219 &bank_forks,
3220 &cluster_info,
3221 rpc_override_health_check.clone(),
3222 &start_progress,
3223 )
3224 .unwrap());
3225
3226 config.wait_for_supermajority = Some(1);
3228 assert!(matches!(
3229 wait_for_supermajority(
3230 &config,
3231 None,
3232 &bank_forks,
3233 &cluster_info,
3234 rpc_override_health_check.clone(),
3235 &start_progress,
3236 ),
3237 Err(ValidatorError::NotEnoughLedgerData(_, _)),
3238 ));
3239
3240 let bank_forks = BankForks::new_rw_arc(Bank::new_from_parent(
3242 bank_forks.read().unwrap().root_bank(),
3243 &Pubkey::default(),
3244 1,
3245 ));
3246 config.wait_for_supermajority = Some(0);
3247 assert!(!wait_for_supermajority(
3248 &config,
3249 None,
3250 &bank_forks,
3251 &cluster_info,
3252 rpc_override_health_check.clone(),
3253 &start_progress,
3254 )
3255 .unwrap());
3256
3257 config.wait_for_supermajority = Some(1);
3259 config.expected_bank_hash = Some(hash(&[1]));
3260 assert!(matches!(
3261 wait_for_supermajority(
3262 &config,
3263 None,
3264 &bank_forks,
3265 &cluster_info,
3266 rpc_override_health_check,
3267 &start_progress,
3268 ),
3269 Err(ValidatorError::BankHashMismatch(_, _)),
3270 ));
3271 }
3272
3273 #[test]
3274 fn test_is_snapshot_config_valid() {
3275 fn new_snapshot_config(
3276 full_snapshot_archive_interval_slots: Slot,
3277 incremental_snapshot_archive_interval_slots: Slot,
3278 ) -> SnapshotConfig {
3279 SnapshotConfig {
3280 full_snapshot_archive_interval: SnapshotInterval::Slots(
3281 NonZeroU64::new(full_snapshot_archive_interval_slots).unwrap(),
3282 ),
3283 incremental_snapshot_archive_interval: SnapshotInterval::Slots(
3284 NonZeroU64::new(incremental_snapshot_archive_interval_slots).unwrap(),
3285 ),
3286 ..SnapshotConfig::default()
3287 }
3288 }
3289
3290 assert!(is_snapshot_config_valid(&SnapshotConfig::default()));
3292
3293 assert!(is_snapshot_config_valid(&SnapshotConfig {
3295 incremental_snapshot_archive_interval: SnapshotInterval::Disabled,
3296 ..SnapshotConfig::default()
3297 }));
3298
3299 assert!(!is_snapshot_config_valid(&SnapshotConfig {
3301 full_snapshot_archive_interval: SnapshotInterval::Disabled,
3302 ..SnapshotConfig::default()
3303 }));
3304
3305 assert!(is_snapshot_config_valid(&new_snapshot_config(400, 200)));
3307 assert!(is_snapshot_config_valid(&new_snapshot_config(100, 42)));
3308 assert!(is_snapshot_config_valid(&new_snapshot_config(444, 200)));
3309 assert!(is_snapshot_config_valid(&new_snapshot_config(400, 222)));
3310
3311 assert!(!is_snapshot_config_valid(&new_snapshot_config(42, 100)));
3313 assert!(!is_snapshot_config_valid(&new_snapshot_config(100, 100)));
3314 assert!(!is_snapshot_config_valid(&new_snapshot_config(100, 200)));
3315
3316 assert!(is_snapshot_config_valid(&SnapshotConfig::new_disabled()));
3318 assert!(is_snapshot_config_valid(&SnapshotConfig::new_load_only()));
3319 assert!(is_snapshot_config_valid(&SnapshotConfig {
3320 full_snapshot_archive_interval: SnapshotInterval::Slots(NonZeroU64::new(37).unwrap()),
3321 incremental_snapshot_archive_interval: SnapshotInterval::Slots(
3322 NonZeroU64::new(41).unwrap()
3323 ),
3324 ..SnapshotConfig::new_load_only()
3325 }));
3326 assert!(is_snapshot_config_valid(&SnapshotConfig {
3327 full_snapshot_archive_interval: SnapshotInterval::Disabled,
3328 incremental_snapshot_archive_interval: SnapshotInterval::Disabled,
3329 ..SnapshotConfig::new_load_only()
3330 }));
3331 }
3332
3333 fn target_tick_duration() -> Duration {
3334 let target_tick_duration_us =
3342 solana_clock::DEFAULT_MS_PER_SLOT * 1000 / solana_clock::DEFAULT_TICKS_PER_SLOT;
3343 assert_eq!(target_tick_duration_us, 6250);
3344 Duration::from_micros(target_tick_duration_us)
3345 }
3346
3347 #[test]
3348 fn test_poh_speed() {
3349 agave_logger::setup();
3350 let poh_config = PohConfig {
3351 target_tick_duration: target_tick_duration(),
3352 hashes_per_tick: Some(100 * solana_clock::DEFAULT_HASHES_PER_TICK),
3354 ..PohConfig::default()
3355 };
3356 let genesis_config = GenesisConfig {
3357 poh_config,
3358 ..GenesisConfig::default()
3359 };
3360 let bank = Bank::new_for_tests(&genesis_config);
3361 assert!(check_poh_speed(&bank, Some(10_000)).is_err());
3362 }
3363
3364 #[test]
3365 fn test_poh_speed_no_hashes_per_tick() {
3366 agave_logger::setup();
3367 let poh_config = PohConfig {
3368 target_tick_duration: target_tick_duration(),
3369 hashes_per_tick: None,
3370 ..PohConfig::default()
3371 };
3372 let genesis_config = GenesisConfig {
3373 poh_config,
3374 ..GenesisConfig::default()
3375 };
3376 let bank = Bank::new_for_tests(&genesis_config);
3377 check_poh_speed(&bank, Some(10_000)).unwrap();
3378 }
3379}