1#![forbid(unsafe_code)]
17
18mod transactions_queue;
19use transactions_queue::TransactionsQueue;
20
21#[macro_use]
22extern crate tracing;
23
24#[cfg(feature = "metrics")]
25extern crate snarkos_node_metrics as metrics;
26
27use snarkos_account::Account;
28use snarkos_node_bft::{
29 BFT,
30 MAX_BATCH_DELAY_IN_MS,
31 Primary,
32 helpers::{
33 ConsensusReceiver,
34 PrimarySender,
35 Storage as NarwhalStorage,
36 fmt_id,
37 init_consensus_channels,
38 init_primary_channels,
39 },
40 spawn_blocking,
41};
42use snarkos_node_bft_ledger_service::LedgerService;
43use snarkos_node_bft_storage_service::BFTPersistentStorage;
44use snarkos_node_sync::{BlockSync, Ping};
45
46use snarkvm::{
47 ledger::{
48 block::Transaction,
49 narwhal::{BatchHeader, Data, Subdag, Transmission, TransmissionID},
50 puzzle::{Solution, SolutionID},
51 },
52 prelude::*,
53};
54
55use aleo_std::StorageMode;
56use anyhow::Result;
57use colored::Colorize;
58use indexmap::IndexMap;
59#[cfg(feature = "locktick")]
60use locktick::parking_lot::{Mutex, RwLock};
61use lru::LruCache;
62#[cfg(not(feature = "locktick"))]
63use parking_lot::{Mutex, RwLock};
64use std::{future::Future, net::SocketAddr, num::NonZeroUsize, sync::Arc, time::Duration};
65use tokio::{sync::oneshot, task::JoinHandle};
66
67#[cfg(feature = "metrics")]
68use std::collections::HashMap;
69
70const CAPACITY_FOR_DEPLOYMENTS: usize = 1 << 10;
73const CAPACITY_FOR_EXECUTIONS: usize = 1 << 10;
76const CAPACITY_FOR_SOLUTIONS: usize = 1 << 10;
79const MAX_DEPLOYMENTS_PER_INTERVAL: usize = 1;
82
83#[derive(Clone)]
90pub struct Consensus<N: Network> {
91 ledger: Arc<dyn LedgerService<N>>,
93 bft: BFT<N>,
95 primary_sender: PrimarySender<N>,
97 solutions_queue: Arc<Mutex<LruCache<SolutionID<N>, Solution<N>>>>,
99 transactions_queue: Arc<RwLock<TransactionsQueue<N>>>,
101 seen_solutions: Arc<Mutex<LruCache<SolutionID<N>, ()>>>,
103 seen_transactions: Arc<Mutex<LruCache<N::TransactionID, ()>>>,
105 #[cfg(feature = "metrics")]
106 transmissions_tracker: Arc<Mutex<HashMap<TransmissionID<N>, i64>>>,
107 handles: Arc<Mutex<Vec<JoinHandle<()>>>>,
109 ping: Arc<Ping<N>>,
111 block_sync: Arc<BlockSync<N>>,
113}
114
115impl<N: Network> Consensus<N> {
116 #[allow(clippy::too_many_arguments)]
118 pub async fn new(
119 account: Account<N>,
120 ledger: Arc<dyn LedgerService<N>>,
121 block_sync: Arc<BlockSync<N>>,
122 ip: Option<SocketAddr>,
123 trusted_validators: &[SocketAddr],
124 storage_mode: StorageMode,
125 ping: Arc<Ping<N>>,
126 dev: Option<u16>,
127 ) -> Result<Self> {
128 let (primary_sender, primary_receiver) = init_primary_channels::<N>();
130 let transmissions = Arc::new(BFTPersistentStorage::open(storage_mode.clone())?);
132 let storage = NarwhalStorage::new(ledger.clone(), transmissions, BatchHeader::<N>::MAX_GC_ROUNDS as u64);
134 let bft =
136 BFT::new(account, storage, ledger.clone(), block_sync.clone(), ip, trusted_validators, storage_mode, dev)?;
137 let mut _self = Self {
139 ledger,
140 bft,
141 block_sync,
142 primary_sender,
143 solutions_queue: Arc::new(Mutex::new(LruCache::new(NonZeroUsize::new(CAPACITY_FOR_SOLUTIONS).unwrap()))),
144 transactions_queue: Default::default(),
145 seen_solutions: Arc::new(Mutex::new(LruCache::new(NonZeroUsize::new(1 << 16).unwrap()))),
146 seen_transactions: Arc::new(Mutex::new(LruCache::new(NonZeroUsize::new(1 << 16).unwrap()))),
147 #[cfg(feature = "metrics")]
148 transmissions_tracker: Default::default(),
149 handles: Default::default(),
150 ping: ping.clone(),
151 };
152
153 info!("Starting the consensus instance...");
154
155 let (consensus_sender, consensus_receiver) = init_consensus_channels();
157 _self.start_handlers(consensus_receiver);
159 _self.bft.run(Some(ping), Some(consensus_sender), _self.primary_sender.clone(), primary_receiver).await?;
161
162 Ok(_self)
163 }
164
165 pub const fn bft(&self) -> &BFT<N> {
167 &self.bft
168 }
169
170 pub fn contains_transaction(&self, transaction_id: &N::TransactionID) -> bool {
171 self.transactions_queue.read().contains(transaction_id)
172 }
173}
174
175impl<N: Network> Consensus<N> {
176 pub fn num_unconfirmed_transmissions(&self) -> usize {
178 self.bft.num_unconfirmed_transmissions()
179 }
180
181 pub fn num_unconfirmed_ratifications(&self) -> usize {
183 self.bft.num_unconfirmed_ratifications()
184 }
185
186 pub fn num_unconfirmed_solutions(&self) -> usize {
188 self.bft.num_unconfirmed_solutions()
189 }
190
191 pub fn num_unconfirmed_transactions(&self) -> usize {
193 self.bft.num_unconfirmed_transactions()
194 }
195}
196
197impl<N: Network> Consensus<N> {
198 pub fn unconfirmed_transmission_ids(&self) -> impl '_ + Iterator<Item = TransmissionID<N>> {
200 self.worker_transmission_ids().chain(self.inbound_transmission_ids())
201 }
202
203 pub fn unconfirmed_transmissions(&self) -> impl '_ + Iterator<Item = (TransmissionID<N>, Transmission<N>)> {
205 self.worker_transmissions().chain(self.inbound_transmissions())
206 }
207
208 pub fn unconfirmed_solutions(&self) -> impl '_ + Iterator<Item = (SolutionID<N>, Data<Solution<N>>)> {
210 self.worker_solutions().chain(self.inbound_solutions())
211 }
212
213 pub fn unconfirmed_transactions(&self) -> impl '_ + Iterator<Item = (N::TransactionID, Data<Transaction<N>>)> {
215 self.worker_transactions().chain(self.inbound_transactions())
216 }
217}
218
219impl<N: Network> Consensus<N> {
220 pub fn worker_transmission_ids(&self) -> impl '_ + Iterator<Item = TransmissionID<N>> {
222 self.bft.worker_transmission_ids()
223 }
224
225 pub fn worker_transmissions(&self) -> impl '_ + Iterator<Item = (TransmissionID<N>, Transmission<N>)> {
227 self.bft.worker_transmissions()
228 }
229
230 pub fn worker_solutions(&self) -> impl '_ + Iterator<Item = (SolutionID<N>, Data<Solution<N>>)> {
232 self.bft.worker_solutions()
233 }
234
235 pub fn worker_transactions(&self) -> impl '_ + Iterator<Item = (N::TransactionID, Data<Transaction<N>>)> {
237 self.bft.worker_transactions()
238 }
239}
240
241impl<N: Network> Consensus<N> {
242 pub fn inbound_transmission_ids(&self) -> impl '_ + Iterator<Item = TransmissionID<N>> {
244 self.inbound_transmissions().map(|(id, _)| id)
245 }
246
247 pub fn inbound_transmissions(&self) -> impl '_ + Iterator<Item = (TransmissionID<N>, Transmission<N>)> {
249 self.inbound_transactions()
250 .map(|(id, tx)| {
251 (
252 TransmissionID::Transaction(id, tx.to_checksum::<N>().unwrap_or_default()),
253 Transmission::Transaction(tx),
254 )
255 })
256 .chain(self.inbound_solutions().map(|(id, solution)| {
257 (
258 TransmissionID::Solution(id, solution.to_checksum::<N>().unwrap_or_default()),
259 Transmission::Solution(solution),
260 )
261 }))
262 }
263
264 pub fn inbound_solutions(&self) -> impl '_ + Iterator<Item = (SolutionID<N>, Data<Solution<N>>)> {
266 self.solutions_queue.lock().clone().into_iter().map(|(id, solution)| (id, Data::Object(solution)))
268 }
269
270 pub fn inbound_transactions(&self) -> impl '_ + Iterator<Item = (N::TransactionID, Data<Transaction<N>>)> {
272 self.transactions_queue.read().transactions().map(|(id, tx)| (id, Data::Object(tx)))
274 }
275}
276
277impl<N: Network> Consensus<N> {
278 pub async fn add_unconfirmed_solution(&self, solution: Solution<N>) -> Result<()> {
281 let checksum = Data::<Solution<N>>::Buffer(solution.to_bytes_le()?.into()).to_checksum::<N>()?;
283 {
285 let solution_id = solution.id();
286
287 if self.seen_solutions.lock().put(solution_id, ()).is_some() {
289 return Ok(());
291 }
292 if self.ledger.contains_transmission(&TransmissionID::Solution(solution_id, checksum))? {
294 bail!("Solution '{}' exists in the ledger {}", fmt_id(solution_id), "(skipping)".dimmed());
295 }
296 #[cfg(feature = "metrics")]
297 {
298 metrics::increment_gauge(metrics::consensus::UNCONFIRMED_SOLUTIONS, 1f64);
299 let timestamp = snarkos_node_bft::helpers::now();
300 self.transmissions_tracker.lock().insert(TransmissionID::Solution(solution.id(), checksum), timestamp);
301 }
302 trace!("Received unconfirmed solution '{}' in the queue", fmt_id(solution_id));
304 if self.solutions_queue.lock().put(solution_id, solution).is_some() {
305 bail!("Solution '{}' exists in the memory pool", fmt_id(solution_id));
306 }
307 }
308
309 self.process_unconfirmed_solutions().await
311 }
312
313 async fn process_unconfirmed_solutions(&self) -> Result<()> {
316 let num_unconfirmed_solutions = self.num_unconfirmed_solutions();
318 let num_unconfirmed_transmissions = self.num_unconfirmed_transmissions();
319 if num_unconfirmed_solutions >= N::MAX_SOLUTIONS
320 || num_unconfirmed_transmissions >= Primary::<N>::MAX_TRANSMISSIONS_TOLERANCE
321 {
322 return Ok(());
323 }
324 let solutions = {
326 let capacity = N::MAX_SOLUTIONS.saturating_sub(num_unconfirmed_solutions);
328 let mut queue = self.solutions_queue.lock();
330 let num_solutions = queue.len().min(capacity);
332 (0..num_solutions).filter_map(|_| queue.pop_lru().map(|(_, solution)| solution)).collect::<Vec<_>>()
334 };
335 for solution in solutions.into_iter() {
337 let solution_id = solution.id();
338 trace!("Adding unconfirmed solution '{}' to the memory pool...", fmt_id(solution_id));
339 if let Err(e) = self.primary_sender.send_unconfirmed_solution(solution_id, Data::Object(solution)).await {
341 if self.bft.is_synced() {
343 if self.ledger.latest_block_height() % N::NUM_BLOCKS_PER_EPOCH > 10 {
345 warn!("Failed to add unconfirmed solution '{}' to the memory pool - {e}", fmt_id(solution_id))
346 };
347 }
348 }
349 }
350 Ok(())
351 }
352
353 pub async fn add_unconfirmed_transaction(&self, transaction: Transaction<N>) -> Result<()> {
356 let checksum = Data::<Transaction<N>>::Buffer(transaction.to_bytes_le()?.into()).to_checksum::<N>()?;
358 {
360 let transaction_id = transaction.id();
361
362 if transaction.is_fee() {
364 bail!("Transaction '{}' is a fee transaction {}", fmt_id(transaction_id), "(skipping)".dimmed());
365 }
366 if self.seen_transactions.lock().put(transaction_id, ()).is_some() {
368 return Ok(());
370 }
371 if self.ledger.contains_transmission(&TransmissionID::Transaction(transaction_id, checksum))? {
373 bail!("Transaction '{}' exists in the ledger {}", fmt_id(transaction_id), "(skipping)".dimmed());
374 }
375 #[cfg(feature = "metrics")]
376 {
377 metrics::increment_gauge(metrics::consensus::UNCONFIRMED_TRANSACTIONS, 1f64);
378 let timestamp = snarkos_node_bft::helpers::now();
379 self.transmissions_tracker
380 .lock()
381 .insert(TransmissionID::Transaction(transaction.id(), checksum), timestamp);
382 }
383 if self.contains_transaction(&transaction_id) {
385 bail!("Transaction '{}' exists in the memory pool", fmt_id(transaction_id));
386 }
387 trace!("Received unconfirmed transaction '{}' in the queue", fmt_id(transaction_id));
389 let priority_fee = transaction.priority_fee_amount()?;
390 self.transactions_queue.write().insert(transaction_id, transaction, priority_fee)?;
391 }
392
393 self.process_unconfirmed_transactions().await
395 }
396
397 async fn process_unconfirmed_transactions(&self) -> Result<()> {
400 let num_unconfirmed_transmissions = self.num_unconfirmed_transmissions();
402 if num_unconfirmed_transmissions >= Primary::<N>::MAX_TRANSMISSIONS_TOLERANCE {
403 return Ok(());
404 }
405 let transactions = {
407 let capacity = Primary::<N>::MAX_TRANSMISSIONS_TOLERANCE.saturating_sub(num_unconfirmed_transmissions);
409 let mut tx_queue = self.transactions_queue.write();
411 let num_deployments = tx_queue.deployments.len().min(capacity).min(MAX_DEPLOYMENTS_PER_INTERVAL);
413 let num_executions = tx_queue.executions.len().min(capacity.saturating_sub(num_deployments));
415 let selector_iter = (0..num_deployments).map(|_| true).interleave((0..num_executions).map(|_| false));
418 selector_iter
420 .filter_map(
421 |select_deployment| {
422 if select_deployment { tx_queue.deployments.pop() } else { tx_queue.executions.pop() }
423 },
424 )
425 .map(|(_, tx)| tx)
426 .collect_vec()
427 };
428 for transaction in transactions.into_iter() {
430 let transaction_id = transaction.id();
431 let tx_type_str = match transaction {
433 Transaction::Deploy(..) => "deployment",
434 Transaction::Execute(..) => "execution",
435 Transaction::Fee(..) => "fee",
436 };
437 trace!("Adding unconfirmed {tx_type_str} transaction '{}' to the memory pool...", fmt_id(transaction_id));
438 if let Err(e) =
440 self.primary_sender.send_unconfirmed_transaction(transaction_id, Data::Object(transaction)).await
441 {
442 if self.bft.is_synced() {
444 warn!(
445 "Failed to add unconfirmed {tx_type_str} transaction '{}' to the memory pool - {e}",
446 fmt_id(transaction_id)
447 );
448 }
449 }
450 }
451 Ok(())
452 }
453}
454
455impl<N: Network> Consensus<N> {
456 fn start_handlers(&self, consensus_receiver: ConsensusReceiver<N>) {
460 let ConsensusReceiver { mut rx_consensus_subdag } = consensus_receiver;
461
462 let self_ = self.clone();
464 self.spawn(async move {
465 while let Some((committed_subdag, transmissions, callback)) = rx_consensus_subdag.recv().await {
466 self_.process_bft_subdag(committed_subdag, transmissions, callback).await;
467 }
468 });
469
470 let self_ = self.clone();
475 self.spawn(async move {
476 loop {
477 tokio::time::sleep(Duration::from_millis(MAX_BATCH_DELAY_IN_MS)).await;
479 if let Err(e) = self_.process_unconfirmed_transactions().await {
481 warn!("Cannot process unconfirmed transactions - {e}");
482 }
483 if let Err(e) = self_.process_unconfirmed_solutions().await {
485 warn!("Cannot process unconfirmed solutions - {e}");
486 }
487 }
488 });
489 }
490
491 async fn process_bft_subdag(
493 &self,
494 subdag: Subdag<N>,
495 transmissions: IndexMap<TransmissionID<N>, Transmission<N>>,
496 callback: oneshot::Sender<Result<()>>,
497 ) {
498 let self_ = self.clone();
500 let transmissions_ = transmissions.clone();
501 let result = spawn_blocking! { self_.try_advance_to_next_block(subdag, transmissions_) };
502
503 if let Err(e) = &result {
505 error!("Unable to advance to the next block - {e}");
506 self.reinsert_transmissions(transmissions).await;
508 }
509 callback.send(result).ok();
512 }
513
514 fn try_advance_to_next_block(
516 &self,
517 subdag: Subdag<N>,
518 transmissions: IndexMap<TransmissionID<N>, Transmission<N>>,
519 ) -> Result<()> {
520 #[cfg(feature = "metrics")]
521 let start = subdag.leader_certificate().batch_header().timestamp();
522 #[cfg(feature = "metrics")]
523 let num_committed_certificates = subdag.values().map(|c| c.len()).sum::<usize>();
524 #[cfg(feature = "metrics")]
525 let current_block_timestamp = self.ledger.latest_block().header().metadata().timestamp();
526
527 let next_block = self.ledger.prepare_advance_to_next_quorum_block(subdag, transmissions)?;
529 self.ledger.check_next_block(&next_block)?;
531 self.ledger.advance_to_next_block(&next_block)?;
533 #[cfg(feature = "telemetry")]
534 let latest_committee = self.ledger.current_committee()?;
536
537 if next_block.height() % N::NUM_BLOCKS_PER_EPOCH == 0 {
539 self.solutions_queue.lock().clear();
541 self.bft.primary().clear_worker_solutions();
543 }
544
545 let locators = self.block_sync.get_block_locators()?;
547 self.ping.update_block_locators(locators);
548
549 self.block_sync.set_sync_height(next_block.height());
551
552 #[cfg(feature = "metrics")]
556 {
557 let elapsed = std::time::Duration::from_secs((snarkos_node_bft::helpers::now() - start) as u64);
558 let next_block_timestamp = next_block.header().metadata().timestamp();
559 let block_latency = next_block_timestamp - current_block_timestamp;
560 let proof_target = next_block.header().proof_target();
561 let coinbase_target = next_block.header().coinbase_target();
562 let cumulative_proof_target = next_block.header().cumulative_proof_target();
563
564 metrics::add_transmission_latency_metric(&self.transmissions_tracker, &next_block);
565
566 metrics::gauge(metrics::consensus::COMMITTED_CERTIFICATES, num_committed_certificates as f64);
567 metrics::histogram(metrics::consensus::CERTIFICATE_COMMIT_LATENCY, elapsed.as_secs_f64());
568 metrics::histogram(metrics::consensus::BLOCK_LATENCY, block_latency as f64);
569 metrics::gauge(metrics::blocks::PROOF_TARGET, proof_target as f64);
570 metrics::gauge(metrics::blocks::COINBASE_TARGET, coinbase_target as f64);
571 metrics::gauge(metrics::blocks::CUMULATIVE_PROOF_TARGET, cumulative_proof_target as f64);
572
573 #[cfg(feature = "telemetry")]
574 {
575 let participation_scores =
577 self.bft().primary().gateway().validator_telemetry().get_participation_scores(&latest_committee);
578
579 for (address, participation_score) in participation_scores {
581 metrics::histogram_label(
582 metrics::consensus::VALIDATOR_PARTICIPATION,
583 "validator_address",
584 address.to_string(),
585 participation_score,
586 )
587 }
588 }
589 }
590 Ok(())
591 }
592
593 async fn reinsert_transmissions(&self, transmissions: IndexMap<TransmissionID<N>, Transmission<N>>) {
595 for (transmission_id, transmission) in transmissions.into_iter() {
597 if let Err(e) = self.reinsert_transmission(transmission_id, transmission).await {
599 warn!(
600 "Unable to reinsert transmission {}.{} into the memory pool - {e}",
601 fmt_id(transmission_id),
602 fmt_id(transmission_id.checksum().unwrap_or_default()).dimmed()
603 );
604 }
605 }
606 }
607
608 async fn reinsert_transmission(
610 &self,
611 transmission_id: TransmissionID<N>,
612 transmission: Transmission<N>,
613 ) -> Result<()> {
614 let (callback, callback_receiver) = oneshot::channel();
616 match (transmission_id, transmission) {
618 (TransmissionID::Ratification, Transmission::Ratification) => return Ok(()),
619 (TransmissionID::Solution(solution_id, _), Transmission::Solution(solution)) => {
620 self.primary_sender.tx_unconfirmed_solution.send((solution_id, solution, callback)).await?;
622 }
623 (TransmissionID::Transaction(transaction_id, _), Transmission::Transaction(transaction)) => {
624 self.primary_sender.tx_unconfirmed_transaction.send((transaction_id, transaction, callback)).await?;
626 }
627 _ => bail!("Mismatching `(transmission_id, transmission)` pair in consensus"),
628 }
629 callback_receiver.await?
631 }
632
633 fn spawn<T: Future<Output = ()> + Send + 'static>(&self, future: T) {
635 self.handles.lock().push(tokio::spawn(future));
636 }
637
638 pub async fn shut_down(&self) {
640 info!("Shutting down consensus...");
641 self.bft.shut_down().await;
643 self.handles.lock().iter().for_each(|handle| handle.abort());
645 }
646}