1use crate::block_assembler::{self, BlockAssembler};
4use crate::callback::{Callbacks, PendingCallback, ProposedCallback, RejectCallback};
5use crate::component::orphan::OrphanPool;
6use crate::component::pool_map::{PoolEntry, Status};
7use crate::component::verify_queue::VerifyQueue;
8use crate::error::{handle_recv_error, handle_send_cmd_error, handle_try_send_error};
9use crate::pool::TxPool;
10use crate::verify_mgr::VerifyMgr;
11use ckb_app_config::{BlockAssemblerConfig, TxPoolConfig};
12use ckb_async_runtime::Handle;
13use ckb_chain_spec::consensus::Consensus;
14use ckb_channel::oneshot;
15use ckb_error::AnyError;
16use ckb_fee_estimator::FeeEstimator;
17use ckb_jsonrpc_types::BlockTemplate;
18use ckb_logger::error;
19use ckb_logger::info;
20use ckb_network::{NetworkController, PeerIndex};
21use ckb_script::ChunkCommand;
22use ckb_snapshot::Snapshot;
23use ckb_stop_handler::new_tokio_exit_rx;
24use ckb_store::ChainStore;
25use ckb_types::{
26 core::{
27 BlockView, Cycle, EstimateMode, FeeRate, TransactionView, UncleBlockView, Version,
28 cell::{CellProvider, CellStatus, OverlayCellProvider},
29 tx_pool::{
30 EntryCompleted, PoolTxDetailInfo, Reject, TRANSACTION_SIZE_LIMIT,
31 TransactionWithStatus, TxPoolEntryInfo, TxPoolIds, TxPoolInfo, TxStatus,
32 },
33 },
34 packed::{Byte32, OutPoint, ProposalShortId},
35};
36use ckb_util::LinkedHashSet;
37use ckb_verification::cache::TxVerificationCache;
38use std::collections::{HashMap, HashSet, VecDeque};
39use std::sync::{
40 Arc,
41 atomic::{AtomicBool, Ordering},
42};
43use std::time::Duration;
44use tokio::sync::watch;
45use tokio::sync::{RwLock, mpsc};
46use tokio::task::block_in_place;
47use tokio_util::sync::CancellationToken;
48
49use crate::pool_cell::PoolCell;
50#[cfg(feature = "internal")]
51use crate::{component::entry::TxEntry, process::PlugTarget};
52
53pub(crate) const DEFAULT_CHANNEL_SIZE: usize = 512;
54pub(crate) const BLOCK_ASSEMBLER_CHANNEL_SIZE: usize = 100;
55
56pub(crate) struct Request<A, R> {
57 pub responder: oneshot::Sender<R>,
58 pub arguments: A,
59}
60
61impl<A, R> Request<A, R> {
62 pub(crate) fn call(arguments: A, responder: oneshot::Sender<R>) -> Request<A, R> {
63 Request {
64 responder,
65 arguments,
66 }
67 }
68}
69
70pub(crate) struct AsyncRequest<A, R> {
71 pub responder: tokio::sync::oneshot::Sender<R>,
72 pub arguments: A,
73}
74
75impl<A, R> AsyncRequest<A, R> {
76 pub(crate) fn call(
77 arguments: A,
78 responder: tokio::sync::oneshot::Sender<R>,
79 ) -> AsyncRequest<A, R> {
80 AsyncRequest {
81 responder,
82 arguments,
83 }
84 }
85}
86
87pub(crate) struct Notify<A> {
88 pub arguments: A,
89}
90
91impl<A> Notify<A> {
92 pub(crate) fn new(arguments: A) -> Notify<A> {
93 Notify { arguments }
94 }
95}
96
97pub(crate) type BlockTemplateResult = Result<BlockTemplate, AnyError>;
98type BlockTemplateArgs = (Option<u64>, Option<u64>, Option<Version>);
99
100pub(crate) type SubmitTxResult = Result<(), Reject>;
101
102pub(crate) type TestAcceptTxResult = Result<EntryCompleted, Reject>;
103
104type GetTxStatusResult = Result<(TxStatus, Option<Cycle>), AnyError>;
105type GetTransactionWithStatusResult = Result<TransactionWithStatus, AnyError>;
106type FetchTxsWithCyclesResult = Vec<(ProposalShortId, (TransactionView, Cycle))>;
107
108pub(crate) type ChainReorgArgs = (
109 VecDeque<BlockView>,
110 VecDeque<BlockView>,
111 HashSet<ProposalShortId>,
112 Arc<Snapshot>,
113);
114
115pub(crate) type FeeEstimatesResult = Result<FeeRate, AnyError>;
116
117pub(crate) enum Message {
118 BlockTemplate(Request<BlockTemplateArgs, BlockTemplateResult>),
119 SubmitLocalTx(Request<TransactionView, SubmitTxResult>),
120 RemoveLocalTx(Request<Byte32, bool>),
121 TestAcceptTx(Request<TransactionView, TestAcceptTxResult>),
122 SubmitRemoteTx(Request<(TransactionView, Cycle, PeerIndex), ()>),
123 NotifyTxs(Notify<Vec<TransactionView>>),
124 FreshProposalsFilter(AsyncRequest<Vec<ProposalShortId>, Vec<ProposalShortId>>),
125 FetchTxs(AsyncRequest<HashSet<ProposalShortId>, HashMap<ProposalShortId, TransactionView>>),
126 FetchTxsWithCycles(AsyncRequest<HashSet<ProposalShortId>, FetchTxsWithCyclesResult>),
127 GetTxPoolInfo(Request<(), TxPoolInfo>),
128 GetLiveCell(Request<(OutPoint, bool), CellStatus>),
129 GetTxStatus(Request<Byte32, GetTxStatusResult>),
130 GetTransactionWithStatus(Request<Byte32, GetTransactionWithStatusResult>),
131 NewUncle(Notify<UncleBlockView>),
132 ClearPool(Request<Arc<Snapshot>, ()>),
133 ClearVerifyQueue(Request<(), ()>),
134 GetAllEntryInfo(Request<(), TxPoolEntryInfo>),
135 GetAllIds(Request<(), TxPoolIds>),
136 SavePool(Request<(), ()>),
137 GetPoolTxDetails(Request<Byte32, PoolTxDetailInfo>),
138 GetTotalRecentRejectNum(Request<(), Option<u64>>),
139
140 UpdateIBDState(Request<bool, ()>),
141 EstimateFeeRate(Request<(EstimateMode, bool), FeeEstimatesResult>),
142
143 #[cfg(feature = "internal")]
145 PlugEntry(Request<(Vec<TxEntry>, PlugTarget), ()>),
146 #[cfg(feature = "internal")]
147 PackageTxs(Request<Option<u64>, Vec<TxEntry>>),
148 SubmitLocalTestTx(Request<TransactionView, SubmitTxResult>),
149}
150
151#[derive(Debug, Hash, Eq, PartialEq)]
152pub(crate) enum BlockAssemblerMessage {
153 Pending,
154 Proposed,
155 Uncle,
156 Reset(Arc<Snapshot>),
157}
158
159#[derive(Clone)]
163pub struct TxPoolController {
164 sender: mpsc::Sender<Message>,
165 reorg_sender: mpsc::Sender<Notify<ChainReorgArgs>>,
166 chunk_tx: Arc<watch::Sender<ChunkCommand>>,
167 handle: Handle,
168 started: Arc<AtomicBool>,
169}
170
171macro_rules! send_message {
172 ($self:ident, $msg_type:ident, $args:expr) => {{
173 let (responder, response) = oneshot::channel();
174 let request = Request::call($args, responder);
175 $self
176 .sender
177 .try_send(Message::$msg_type(request))
178 .map_err(|e| {
179 let (_m, e) = handle_try_send_error(e);
180 e
181 })?;
182 block_in_place(|| response.recv())
183 .map_err(handle_recv_error)
184 .map_err(Into::into)
185 }};
186}
187
188macro_rules! send_notify {
189 ($self:ident, $msg_type:ident, $args:expr) => {{
190 let notify = Notify::new($args);
191 $self
192 .sender
193 .try_send(Message::$msg_type(notify))
194 .map_err(|e| {
195 let (_m, e) = handle_try_send_error(e);
196 e.into()
197 })
198 }};
199}
200
201impl TxPoolController {
202 pub fn service_started(&self) -> bool {
204 self.started.load(Ordering::Acquire)
205 }
206
207 #[cfg(feature = "internal")]
209 pub fn set_service_started(&self, v: bool) {
210 self.started.store(v, Ordering::Release);
211 }
212
213 pub fn handle(&self) -> &Handle {
215 &self.handle
216 }
217
218 pub fn get_block_template(
220 &self,
221 bytes_limit: Option<u64>,
222 proposals_limit: Option<u64>,
223 max_version: Option<Version>,
224 ) -> Result<BlockTemplateResult, AnyError> {
225 send_message!(
226 self,
227 BlockTemplate,
228 (bytes_limit, proposals_limit, max_version)
229 )
230 }
231
232 pub fn notify_new_uncle(&self, uncle: UncleBlockView) -> Result<(), AnyError> {
234 send_notify!(self, NewUncle, uncle)
235 }
236
237 pub fn update_tx_pool_for_reorg(
242 &self,
243 detached_blocks: VecDeque<BlockView>,
244 attached_blocks: VecDeque<BlockView>,
245 detached_proposal_id: HashSet<ProposalShortId>,
246 snapshot: Arc<Snapshot>,
247 ) -> Result<(), AnyError> {
248 let notify = Notify::new((
249 detached_blocks,
250 attached_blocks,
251 detached_proposal_id,
252 snapshot,
253 ));
254 self.reorg_sender.try_send(notify).map_err(|e| {
255 let (_m, e) = handle_try_send_error(e);
256 e.into()
257 })
258 }
259
260 pub fn submit_local_tx(&self, tx: TransactionView) -> Result<SubmitTxResult, AnyError> {
262 send_message!(self, SubmitLocalTx, tx)
263 }
264
265 pub fn test_accept_tx(&self, tx: TransactionView) -> Result<TestAcceptTxResult, AnyError> {
269 send_message!(self, TestAcceptTx, tx)
270 }
271
272 pub fn remove_local_tx(&self, tx_hash: Byte32) -> Result<bool, AnyError> {
274 send_message!(self, RemoveLocalTx, tx_hash)
275 }
276
277 pub async fn submit_remote_tx(
279 &self,
280 tx: TransactionView,
281 declared_cycles: Cycle,
282 peer: PeerIndex,
283 ) -> Result<(), AnyError> {
284 send_message!(self, SubmitRemoteTx, (tx, declared_cycles, peer))
285 }
286
287 pub fn notify_txs(&self, txs: Vec<TransactionView>) -> Result<(), AnyError> {
289 send_notify!(self, NotifyTxs, txs)
290 }
291
292 pub async fn notify_txs_async(&self, txs: Vec<TransactionView>) -> Result<(), AnyError> {
294 let notify = Notify::new(txs);
295 self.sender
296 .send(Message::NotifyTxs(notify))
297 .await
298 .map_err(|e| {
299 let e = ckb_error::OtherError::new(format!("SendError {e}"));
300 e.into()
301 })
302 }
303
304 pub fn get_tx_pool_info(&self) -> Result<TxPoolInfo, AnyError> {
306 send_message!(self, GetTxPoolInfo, ())
307 }
308
309 pub fn get_live_cell(
311 &self,
312 out_point: OutPoint,
313 with_data: bool,
314 ) -> Result<CellStatus, AnyError> {
315 send_message!(self, GetLiveCell, (out_point, with_data))
316 }
317
318 pub async fn fresh_proposals_filter(
320 &self,
321 proposals: Vec<ProposalShortId>,
322 ) -> Result<Vec<ProposalShortId>, AnyError> {
323 let (responder, response) = tokio::sync::oneshot::channel();
324 let request = AsyncRequest::call(proposals, responder);
325 self.sender
326 .send(Message::FreshProposalsFilter(request))
327 .await?;
328 response.await.map_err(Into::into)
329 }
330
331 pub fn get_tx_status(&self, hash: Byte32) -> Result<GetTxStatusResult, AnyError> {
333 send_message!(self, GetTxStatus, hash)
334 }
335
336 pub fn get_transaction_with_status(
338 &self,
339 hash: Byte32,
340 ) -> Result<GetTransactionWithStatusResult, AnyError> {
341 send_message!(self, GetTransactionWithStatus, hash)
342 }
343
344 pub async fn fetch_txs(
347 &self,
348 short_ids: HashSet<ProposalShortId>,
349 ) -> Result<HashMap<ProposalShortId, TransactionView>, AnyError> {
350 let (responder, response) = tokio::sync::oneshot::channel();
351 let request = AsyncRequest::call(short_ids, responder);
352 self.sender.send(Message::FetchTxs(request)).await?;
353 response.await.map_err(Into::into)
354 }
355
356 pub async fn fetch_txs_with_cycles(
359 &self,
360 short_ids: HashSet<ProposalShortId>,
361 ) -> Result<FetchTxsWithCyclesResult, AnyError> {
362 let (responder, response) = tokio::sync::oneshot::channel();
363 let request = AsyncRequest::call(short_ids, responder);
364 self.sender
365 .send(Message::FetchTxsWithCycles(request))
366 .await?;
367 response.await.map_err(Into::into)
368 }
369
370 pub fn clear_pool(&self, new_snapshot: Arc<Snapshot>) -> Result<(), AnyError> {
372 send_message!(self, ClearPool, new_snapshot)
373 }
374
375 pub fn clear_verify_queue(&self) -> Result<(), AnyError> {
377 send_message!(self, ClearVerifyQueue, ())
378 }
379
380 pub fn get_all_entry_info(&self) -> Result<TxPoolEntryInfo, AnyError> {
382 send_message!(self, GetAllEntryInfo, ())
383 }
384
385 pub fn get_all_ids(&self) -> Result<TxPoolIds, AnyError> {
387 send_message!(self, GetAllIds, ())
388 }
389
390 pub fn get_tx_detail(&self, tx_hash: Byte32) -> Result<PoolTxDetailInfo, AnyError> {
392 send_message!(self, GetPoolTxDetails, tx_hash)
393 }
394
395 pub fn save_pool(&self) -> Result<(), AnyError> {
397 info!("Please be patient, tx-pool are saving data into disk ...");
398 send_message!(self, SavePool, ())
399 }
400
401 pub fn update_ibd_state(&self, in_ibd: bool) -> Result<(), AnyError> {
403 send_message!(self, UpdateIBDState, in_ibd)
404 }
405
406 pub fn estimate_fee_rate(
408 &self,
409 estimate_mode: EstimateMode,
410 enable_fallback: bool,
411 ) -> Result<FeeEstimatesResult, AnyError> {
412 send_message!(self, EstimateFeeRate, (estimate_mode, enable_fallback))
413 }
414
415 pub fn suspend_chunk_process(&self) -> Result<(), AnyError> {
417 self.chunk_tx
419 .send(ChunkCommand::Suspend)
420 .map_err(handle_send_cmd_error)
421 .map_err(Into::into)
422 }
423
424 pub fn continue_chunk_process(&self) -> Result<(), AnyError> {
426 self.chunk_tx
428 .send(ChunkCommand::Resume)
429 .map_err(handle_send_cmd_error)
430 .map_err(Into::into)
431 }
432
433 fn load_persisted_data(&self, txs: Vec<TransactionView>) -> Result<(), AnyError> {
435 if !txs.is_empty() {
436 info!("Loading persistent tx-pool data, total {} txs", txs.len());
437 let mut failed_txs = 0;
438 for tx in txs {
439 if self.submit_local_tx(tx)?.is_err() {
440 failed_txs += 1;
441 }
442 }
443 if failed_txs == 0 {
444 info!("Persistent tx-pool data is loaded");
445 } else {
446 info!(
447 "Persistent tx-pool data is loaded, {} stale txs are ignored",
448 failed_txs
449 );
450 }
451 }
452 Ok(())
453 }
454
455 #[cfg(feature = "internal")]
457 pub fn plug_entry(&self, entries: Vec<TxEntry>, target: PlugTarget) -> Result<(), AnyError> {
458 send_message!(self, PlugEntry, (entries, target))
459 }
460
461 #[cfg(feature = "internal")]
463 pub fn package_txs(&self, bytes_limit: Option<u64>) -> Result<Vec<TxEntry>, AnyError> {
464 send_message!(self, PackageTxs, bytes_limit)
465 }
466
467 pub fn submit_local_test_tx(&self, tx: TransactionView) -> Result<SubmitTxResult, AnyError> {
469 send_message!(self, SubmitLocalTestTx, tx)
470 }
471
472 pub fn get_total_recent_reject_num(&self) -> Result<Option<u64>, AnyError> {
474 send_message!(self, GetTotalRecentRejectNum, ())
475 }
476}
477
478pub struct TxPoolServiceBuilder {
480 pub(crate) tx_pool_config: TxPoolConfig,
481 pub(crate) tx_pool_controller: TxPoolController,
482 pub(crate) snapshot: Arc<Snapshot>,
483 pub(crate) block_assembler: Option<BlockAssembler>,
484 pub(crate) txs_verify_cache: Arc<RwLock<TxVerificationCache>>,
485 pub(crate) callbacks: Callbacks,
486 pub(crate) receiver: mpsc::Receiver<Message>,
487 pub(crate) reorg_receiver: mpsc::Receiver<Notify<ChainReorgArgs>>,
488 pub(crate) signal_receiver: CancellationToken,
489 pub(crate) handle: Handle,
490 pub(crate) tx_relay_sender: ckb_channel::Sender<TxVerificationResult>,
491 pub(crate) chunk_rx: watch::Receiver<ChunkCommand>,
492 pub(crate) started: Arc<AtomicBool>,
493 pub(crate) block_assembler_channel: (
494 mpsc::Sender<BlockAssemblerMessage>,
495 mpsc::Receiver<BlockAssemblerMessage>,
496 ),
497 pub(crate) fee_estimator: FeeEstimator,
498}
499
500impl TxPoolServiceBuilder {
501 pub fn new(
503 tx_pool_config: TxPoolConfig,
504 snapshot: Arc<Snapshot>,
505 block_assembler_config: Option<BlockAssemblerConfig>,
506 txs_verify_cache: Arc<RwLock<TxVerificationCache>>,
507 handle: &Handle,
508 tx_relay_sender: ckb_channel::Sender<TxVerificationResult>,
509 fee_estimator: FeeEstimator,
510 ) -> (TxPoolServiceBuilder, TxPoolController) {
511 let (sender, receiver) = mpsc::channel(DEFAULT_CHANNEL_SIZE);
512 let block_assembler_channel = mpsc::channel(BLOCK_ASSEMBLER_CHANNEL_SIZE);
513 let (reorg_sender, reorg_receiver) = mpsc::channel(DEFAULT_CHANNEL_SIZE);
514 let signal_receiver: CancellationToken = new_tokio_exit_rx();
515 let (chunk_tx, chunk_rx) = watch::channel(ChunkCommand::Resume);
516 let started = Arc::new(AtomicBool::new(false));
517
518 let controller = TxPoolController {
519 sender,
520 reorg_sender,
521 handle: handle.clone(),
522 chunk_tx: Arc::new(chunk_tx),
523 started: Arc::clone(&started),
524 };
525
526 let block_assembler =
527 block_assembler_config.map(|config| BlockAssembler::new(config, Arc::clone(&snapshot)));
528 let builder = TxPoolServiceBuilder {
529 tx_pool_config,
530 tx_pool_controller: controller.clone(),
531 snapshot,
532 block_assembler,
533 txs_verify_cache,
534 callbacks: Callbacks::new(),
535 receiver,
536 reorg_receiver,
537 signal_receiver,
538 handle: handle.clone(),
539 tx_relay_sender,
540 chunk_rx,
541 started,
542 block_assembler_channel,
543 fee_estimator,
544 };
545
546 (builder, controller)
547 }
548
549 pub fn register_pending(&mut self, callback: PendingCallback) {
551 self.callbacks.register_pending(callback);
552 }
553
554 pub fn tx_relay_sender(&self) -> ckb_channel::Sender<TxVerificationResult> {
556 self.tx_relay_sender.clone()
557 }
558
559 pub fn register_proposed(&mut self, callback: ProposedCallback) {
561 self.callbacks.register_proposed(callback);
562 }
563
564 pub fn register_reject(&mut self, callback: RejectCallback) {
566 self.callbacks.register_reject(callback);
567 }
568
569 pub fn start(self, network: NetworkController) {
571 let consensus = self.snapshot.cloned_consensus();
572
573 let verify_queue = Arc::new(RwLock::new(VerifyQueue::new(
574 self.tx_pool_config.max_tx_verify_cycles,
575 )));
576
577 let tx_pool = TxPool::new(self.tx_pool_config, self.snapshot);
578 let txs = match tx_pool.load_from_file() {
579 Ok(txs) => txs,
580 Err(e) => {
581 error!("{}", e.to_string());
582 error!("Failed to load txs from tx-pool persistent data file, all txs are ignored");
583 Vec::new()
584 }
585 };
586
587 let (block_assembler_sender, mut block_assembler_receiver) = self.block_assembler_channel;
588 let service = TxPoolService {
589 tx_pool_config: Arc::new(tx_pool.config.clone()),
590 tx_pool: Arc::new(RwLock::new(tx_pool)),
591 orphan: Arc::new(RwLock::new(OrphanPool::new())),
592 block_assembler: self.block_assembler,
593 txs_verify_cache: self.txs_verify_cache,
594 callbacks: Arc::new(self.callbacks),
595 tx_relay_sender: self.tx_relay_sender,
596 block_assembler_sender,
597 verify_queue: Arc::clone(&verify_queue),
598 network,
599 consensus,
600 fee_estimator: self.fee_estimator,
601 };
602
603 let mut verify_mgr =
604 VerifyMgr::new(service.clone(), self.chunk_rx, self.signal_receiver.clone());
605 self.handle.spawn(async move { verify_mgr.run().await });
606
607 let mut receiver = self.receiver;
608 let mut reorg_receiver = self.reorg_receiver;
609 let handle_clone = self.handle.clone();
610
611 let process_service = service.clone();
612 let signal_receiver = self.signal_receiver.clone();
613 self.handle.spawn(async move {
614 loop {
615 tokio::select! {
616 Some(message) = receiver.recv() => {
617 let service_clone = process_service.clone();
618 handle_clone.spawn(process(service_clone, message));
619 },
620 _ = signal_receiver.cancelled() => {
621 info!("TxPool is saving, please wait...");
622 process_service.save_pool().await;
623 info!("TxPool process_service exit now");
624 break
625 },
626 else => break,
627 }
628 }
629 });
630
631 let process_service = service.clone();
632 if let Some(ref block_assembler) = service.block_assembler {
633 let signal_receiver = self.signal_receiver.clone();
634 let interval = Duration::from_millis(block_assembler.config.update_interval_millis);
635 if interval.is_zero() {
636 ckb_logger::warn!(
639 "block_assembler.update_interval_millis set to zero interval. \
640 This should only be used for tests, as external notification will be disabled."
641 );
642 self.handle.spawn(async move {
643 loop {
644 tokio::select! {
645 Some(message) = block_assembler_receiver.recv() => {
646 let service_clone = process_service.clone();
647 block_assembler::process(service_clone, &message).await;
648 },
649 _ = signal_receiver.cancelled() => {
650 info!("TxPool block_assembler process service received exit signal, exit now");
651 break
652 },
653 else => break,
654 }
655 }
656 });
657 } else {
658 self.handle.spawn(async move {
659 let mut interval = tokio::time::interval(interval);
660 let mut queue = LinkedHashSet::new();
661 loop {
662 tokio::select! {
663 Some(message) = block_assembler_receiver.recv() => {
664 if let BlockAssemblerMessage::Reset(..) = message {
665 let service_clone = process_service.clone();
666 queue.clear();
667 block_assembler::process(service_clone, &message).await;
668 } else {
669 queue.insert(message);
670 }
671 },
672 _ = interval.tick() => {
673 for message in &queue {
674 let service_clone = process_service.clone();
675 block_assembler::process(service_clone, message).await;
676 }
677 if !queue.is_empty()
678 && let Some(ref block_assembler) = process_service.block_assembler {
679 block_assembler.notify().await;
680 }
681 queue.clear();
682 }
683 _ = signal_receiver.cancelled() => {
684 info!("TxPool block_assembler process service received exit signal, exit now");
685 break
686 },
687 else => break,
688 }
689 }
690 });
691 }
692 }
693
694 let signal_receiver = self.signal_receiver;
695 self.handle.spawn(async move {
696 loop {
697 tokio::select! {
698 Some(message) = reorg_receiver.recv() => {
699 let Notify {
700 arguments: (detached_blocks, attached_blocks, detached_proposal_id, snapshot),
701 } = message;
702 let snapshot_clone = Arc::clone(&snapshot);
703 let detached_blocks_clone = detached_blocks.clone();
704 service.update_block_assembler_before_tx_pool_reorg(
705 detached_blocks_clone,
706 snapshot_clone
707 ).await;
708
709 let snapshot_clone = Arc::clone(&snapshot);
710 service
711 .update_tx_pool_for_reorg(
712 detached_blocks,
713 attached_blocks,
714 detached_proposal_id,
715 snapshot_clone,
716 )
717 .await;
718
719 service.update_block_assembler_after_tx_pool_reorg().await;
720 },
721 _ = signal_receiver.cancelled() => {
722 info!("TxPool reorg process service received exit signal, exit now");
723 break
724 },
725 else => break,
726 }
727 }
728 });
729 self.started.store(true, Ordering::Release);
730 if let Err(err) = self.tx_pool_controller.load_persisted_data(txs) {
731 error!("Failed to import persistent txs, cause: {}", err);
732 }
733 }
734}
735
736#[derive(Clone)]
737pub(crate) struct TxPoolService {
738 pub(crate) tx_pool: Arc<RwLock<TxPool>>,
739 pub(crate) orphan: Arc<RwLock<OrphanPool>>,
740 pub(crate) consensus: Arc<Consensus>,
741 pub(crate) tx_pool_config: Arc<TxPoolConfig>,
742 pub(crate) block_assembler: Option<BlockAssembler>,
743 pub(crate) txs_verify_cache: Arc<RwLock<TxVerificationCache>>,
744 pub(crate) callbacks: Arc<Callbacks>,
745 pub(crate) network: NetworkController,
746 pub(crate) tx_relay_sender: ckb_channel::Sender<TxVerificationResult>,
747 pub(crate) verify_queue: Arc<RwLock<VerifyQueue>>,
748 pub(crate) block_assembler_sender: mpsc::Sender<BlockAssemblerMessage>,
749 pub(crate) fee_estimator: FeeEstimator,
750}
751
752pub enum TxVerificationResult {
754 Ok {
756 original_peer: Option<PeerIndex>,
758 tx_hash: Byte32,
760 },
761 UnknownParents {
763 peer: PeerIndex,
765 parents: HashSet<Byte32>,
767 },
768 Reject {
770 tx_hash: Byte32,
772 },
773}
774
775#[allow(clippy::cognitive_complexity)]
776async fn process(mut service: TxPoolService, message: Message) {
777 match message {
778 Message::GetTxPoolInfo(Request { responder, .. }) => {
779 let info = service.info().await;
780 if let Err(e) = responder.send(info) {
781 error!("Responder sending get_tx_pool_info failed {:?}", e);
782 };
783 }
784 Message::GetLiveCell(Request {
785 responder,
786 arguments: (out_point, with_data),
787 }) => {
788 let live_cell_status = service.get_live_cell(out_point, with_data).await;
789 if let Err(e) = responder.send(live_cell_status) {
790 error!("Responder sending get_live_cell failed {:?}", e);
791 };
792 }
793 Message::BlockTemplate(Request {
794 responder,
795 arguments: (_bytes_limit, _proposals_limit, _max_version),
796 }) => {
797 let block_template_result = service.get_block_template().await;
798 if let Err(e) = responder.send(block_template_result) {
799 error!("Responder sending block_template_result failed {:?}", e);
800 };
801 }
802 Message::SubmitLocalTx(Request {
803 responder,
804 arguments: tx,
805 }) => {
806 let result = service.process_tx(tx, None).await.map(|_| ());
807 if let Err(e) = responder.send(result) {
808 error!("Responder sending submit_tx result failed {:?}", e);
809 };
810 }
811 Message::SubmitLocalTestTx(Request {
812 responder,
813 arguments: tx,
814 }) => {
815 let result = service.resumeble_process_tx(tx, None).await.map(|_| ());
816 if let Err(e) = responder.send(result) {
817 error!("Responder sending submit_tx result failed {:?}", e);
818 };
819 }
820 Message::RemoveLocalTx(Request {
821 responder,
822 arguments: tx_hash,
823 }) => {
824 let result = service.remove_tx(tx_hash).await;
825 if let Err(e) = responder.send(result) {
826 error!("Responder sending remove_tx result failed {:?}", e);
827 };
828 }
829 Message::TestAcceptTx(Request {
830 responder,
831 arguments: tx,
832 }) => {
833 let result = service.test_accept_tx(tx).await;
834 if let Err(e) = responder.send(result.map(|r| r.into())) {
835 error!("Responder sending test_accept_tx result failed {:?}", e);
836 };
837 }
838 Message::SubmitRemoteTx(Request {
839 responder,
840 arguments: (tx, declared_cycles, peer),
841 }) => {
842 let _result = service
843 .resumeble_process_tx(tx, Some((declared_cycles, peer)))
844 .await;
845 if let Err(e) = responder.send(()) {
846 error!("Responder sending submit_tx result failed {:?}", e);
847 };
848 }
849 Message::NotifyTxs(Notify { arguments: txs }) => {
850 for tx in txs {
851 let _ret = service.resumeble_process_tx(tx, None).await;
852 }
853 }
854 Message::FreshProposalsFilter(AsyncRequest {
855 responder,
856 arguments: mut proposals,
857 }) => {
858 let tx_pool = service.tx_pool.read().await;
859 proposals.retain(|id| !tx_pool.contains_proposal_id(id));
860 if let Err(e) = responder.send(proposals) {
861 error!("Responder sending fresh_proposals_filter failed {:?}", e);
862 };
863 }
864 Message::GetTxStatus(Request {
865 responder,
866 arguments: hash,
867 }) => {
868 let id = ProposalShortId::from_tx_hash(&hash);
869 let tx_pool = service.tx_pool.read().await;
870 let ret = if let Some(PoolEntry {
871 status,
872 inner: entry,
873 ..
874 }) = tx_pool.pool_map.get_by_id(&id)
875 {
876 let status = if status == &Status::Proposed {
877 TxStatus::Proposed
878 } else {
879 TxStatus::Pending
880 };
881 Ok((status, Some(entry.cycles)))
882 } else if let Some(ref recent_reject_db) = tx_pool.recent_reject {
883 let recent_reject_result = recent_reject_db.get(&hash);
884 if let Ok(recent_reject) = recent_reject_result {
885 if let Some(record) = recent_reject {
886 Ok((TxStatus::Rejected(record), None))
887 } else {
888 Ok((TxStatus::Unknown, None))
889 }
890 } else {
891 Err(recent_reject_result.unwrap_err())
892 }
893 } else {
894 Ok((TxStatus::Unknown, None))
895 };
896
897 if let Err(e) = responder.send(ret) {
898 error!("Responder sending get_tx_status failed {:?}", e)
899 };
900 }
901 Message::GetTransactionWithStatus(Request {
902 responder,
903 arguments: hash,
904 }) => {
905 let id = ProposalShortId::from_tx_hash(&hash);
906 let tx_pool = service.tx_pool.read().await;
907 let ret = if let Some(PoolEntry {
908 status,
909 inner: entry,
910 ..
911 }) = tx_pool.pool_map.get_by_id(&id)
912 {
913 let (tx_status, min_replace_fee) = if status == &Status::Proposed {
914 (TxStatus::Proposed, None)
915 } else {
916 (TxStatus::Pending, tx_pool.min_replace_fee(entry))
917 };
918 Ok(TransactionWithStatus::with_status(
919 Some(entry.transaction().clone()),
920 entry.cycles,
921 entry.timestamp,
922 tx_status,
923 Some(entry.fee),
924 min_replace_fee,
925 ))
926 } else if let Some(ref recent_reject_db) = tx_pool.recent_reject {
927 match recent_reject_db.get(&hash) {
928 Ok(Some(record)) => Ok(TransactionWithStatus::with_rejected(record)),
929 Ok(_) => Ok(TransactionWithStatus::with_unknown()),
930 Err(err) => Err(err),
931 }
932 } else {
933 Ok(TransactionWithStatus::with_unknown())
934 };
935
936 if let Err(e) = responder.send(ret) {
937 error!("Responder sending get_tx_status failed {:?}", e)
938 };
939 }
940 Message::FetchTxs(AsyncRequest {
941 responder,
942 arguments: short_ids,
943 }) => {
944 let tx_pool = service.tx_pool.read().await;
945 let orphan = service.orphan.read().await;
946 let txs = short_ids
947 .into_iter()
948 .filter_map(|short_id| {
949 tx_pool
950 .get_tx_from_pool_or_store(&short_id)
951 .or_else(|| orphan.get(&short_id).map(|entry| &entry.tx).cloned())
952 .map(|tx| (short_id, tx))
953 })
954 .collect();
955 if let Err(e) = responder.send(txs) {
956 error!("Responder sending fetch_txs failed {:?}", e);
957 };
958 }
959 Message::FetchTxsWithCycles(AsyncRequest {
960 responder,
961 arguments: short_ids,
962 }) => {
963 let tx_pool = service.tx_pool.read().await;
964 let txs = short_ids
965 .into_iter()
966 .filter_map(|short_id| {
967 tx_pool
968 .get_tx_with_cycles(&short_id)
969 .map(|(tx, cycles)| (short_id, (tx, cycles)))
970 })
971 .collect();
972 if let Err(e) = responder.send(txs) {
973 error!("Responder sending fetch_txs_with_cycles failed {:?}", e);
974 };
975 }
976 Message::NewUncle(Notify { arguments: uncle }) => {
977 service.receive_candidate_uncle(uncle).await;
978 }
979 Message::ClearPool(Request {
980 responder,
981 arguments: new_snapshot,
982 }) => {
983 service.clear_pool(new_snapshot).await;
984 if let Err(e) = responder.send(()) {
985 error!("Responder sending clear_pool failed {:?}", e)
986 };
987 }
988 Message::ClearVerifyQueue(Request { responder, .. }) => {
989 service.verify_queue.write().await.clear();
990 if let Err(e) = responder.send(()) {
991 error!("Responder sending clear_verify_queue failed {:?}", e)
992 };
993 }
994 Message::GetPoolTxDetails(Request {
995 responder,
996 arguments: tx_hash,
997 }) => {
998 let tx_pool = service.tx_pool.read().await;
999 let id = ProposalShortId::from_tx_hash(&tx_hash);
1000 let tx_details = tx_pool
1001 .get_tx_detail(&id)
1002 .unwrap_or(PoolTxDetailInfo::with_unknown());
1003 if let Err(e) = responder.send(tx_details) {
1004 error!("responder send get_pool_tx_details failed {:?}", e)
1005 };
1006 }
1007 Message::GetAllEntryInfo(Request { responder, .. }) => {
1008 let tx_pool = service.tx_pool.read().await;
1009 let info = tx_pool.get_all_entry_info();
1010 if let Err(e) = responder.send(info) {
1011 error!("Responder sending get_all_entry_info failed {:?}", e)
1012 };
1013 }
1014 Message::GetAllIds(Request { responder, .. }) => {
1015 let tx_pool = service.tx_pool.read().await;
1016 let ids = tx_pool.get_ids();
1017 if let Err(e) = responder.send(ids) {
1018 error!("Responder sending get_ids failed {:?}", e)
1019 };
1020 }
1021 Message::SavePool(Request { responder, .. }) => {
1022 service.save_pool().await;
1023 if let Err(e) = responder.send(()) {
1024 error!("Responder sending save_pool failed {:?}", e)
1025 };
1026 }
1027 Message::UpdateIBDState(Request {
1028 responder,
1029 arguments: in_ibd,
1030 }) => {
1031 service.update_ibd_state(in_ibd).await;
1032 if let Err(e) = responder.send(()) {
1033 error!("Responder sending update_ibd_state failed {:?}", e)
1034 };
1035 }
1036 Message::EstimateFeeRate(Request {
1037 responder,
1038 arguments: (estimate_mode, enable_fallback),
1039 }) => {
1040 let fee_estimates_result = service
1041 .estimate_fee_rate(estimate_mode, enable_fallback)
1042 .await;
1043 if let Err(e) = responder.send(fee_estimates_result) {
1044 error!("Responder sending fee_estimates_result failed {:?}", e)
1045 };
1046 }
1047 #[cfg(feature = "internal")]
1048 Message::PlugEntry(Request {
1049 responder,
1050 arguments: (entries, target),
1051 }) => {
1052 service.plug_entry(entries, target).await;
1053
1054 if let Err(e) = responder.send(()) {
1055 error!("Responder sending plug_entry failed {:?}", e);
1056 };
1057 }
1058 #[cfg(feature = "internal")]
1059 Message::PackageTxs(Request {
1060 responder,
1061 arguments: bytes_limit,
1062 }) => {
1063 let max_block_cycles = service.consensus.max_block_cycles();
1064 let max_block_bytes = service.consensus.max_block_bytes();
1065 let tx_pool = service.tx_pool.read().await;
1066 let (txs, _size, _cycles) = tx_pool.package_txs(
1067 max_block_cycles,
1068 bytes_limit.unwrap_or(max_block_bytes) as usize,
1069 );
1070 if let Err(e) = responder.send(txs) {
1071 error!("Responder sending plug_entry failed {:?}", e);
1072 };
1073 }
1074 Message::GetTotalRecentRejectNum(Request { responder, .. }) => {
1075 let total_recent_reject_num = service.get_total_recent_reject_num().await;
1076 if let Err(e) = responder.send(total_recent_reject_num) {
1077 error!("Responder sending total_recent_reject_num failed {:?}", e)
1078 };
1079 }
1080 }
1081}
1082
1083impl TxPoolService {
1084 async fn info(&self) -> TxPoolInfo {
1086 let tx_pool = self.tx_pool.read().await;
1087 let orphan = self.orphan.read().await;
1088 let verify_queue = self.verify_queue.read().await;
1089 let tip_header = tx_pool.snapshot.tip_header();
1090 TxPoolInfo {
1091 tip_hash: tip_header.hash(),
1092 tip_number: tip_header.number(),
1093 pending_size: tx_pool.pool_map.pending_size(),
1094 proposed_size: tx_pool.pool_map.proposed_size(),
1095 orphan_size: orphan.len(),
1096 total_tx_size: tx_pool.pool_map.total_tx_size,
1097 total_tx_cycles: tx_pool.pool_map.total_tx_cycles,
1098 min_fee_rate: self.tx_pool_config.min_fee_rate,
1099 min_rbf_rate: self.tx_pool_config.min_rbf_rate,
1100 last_txs_updated_at: tx_pool.pool_map.get_max_update_time(),
1101 tx_size_limit: TRANSACTION_SIZE_LIMIT,
1102 max_tx_pool_size: self.tx_pool_config.max_tx_pool_size as u64,
1103 verify_queue_size: verify_queue.len(),
1104 }
1105 }
1106
1107 async fn get_total_recent_reject_num(&self) -> Option<u64> {
1108 let tx_pool = self.tx_pool.read().await;
1109 tx_pool
1110 .recent_reject
1111 .as_ref()
1112 .map(|r| r.get_estimate_total_keys_num())
1113 }
1114
1115 async fn get_live_cell(&self, out_point: OutPoint, eager_load: bool) -> CellStatus {
1117 let tx_pool = self.tx_pool.read().await;
1118 let snapshot = tx_pool.snapshot();
1119 let pool_cell = PoolCell::new(&tx_pool.pool_map, false);
1120 let provider = OverlayCellProvider::new(&pool_cell, snapshot);
1121
1122 match provider.cell(&out_point, false) {
1123 CellStatus::Live(mut cell_meta) => {
1124 if eager_load && let Some((data, data_hash)) = snapshot.get_cell_data(&out_point) {
1125 cell_meta.mem_cell_data = Some(data);
1126 cell_meta.mem_cell_data_hash = Some(data_hash);
1127 }
1128 CellStatus::live_cell(cell_meta)
1129 }
1130 _ => CellStatus::Unknown,
1131 }
1132 }
1133
1134 pub fn should_notify_block_assembler(&self) -> bool {
1135 self.block_assembler.is_some()
1136 }
1137
1138 pub async fn receive_candidate_uncle(&self, uncle: UncleBlockView) {
1139 if let Some(ref block_assembler) = self.block_assembler {
1140 {
1141 block_assembler.candidate_uncles.lock().await.insert(uncle);
1142 }
1143 if self
1144 .block_assembler_sender
1145 .send(BlockAssemblerMessage::Uncle)
1146 .await
1147 .is_err()
1148 {
1149 error!("block_assembler receiver dropped");
1150 }
1151 }
1152 }
1153
1154 pub async fn update_block_assembler_before_tx_pool_reorg(
1155 &self,
1156 detached_blocks: VecDeque<BlockView>,
1157 snapshot: Arc<Snapshot>,
1158 ) {
1159 if let Some(ref block_assembler) = self.block_assembler {
1160 {
1161 let mut candidate_uncles = block_assembler.candidate_uncles.lock().await;
1162 for detached_block in detached_blocks {
1163 candidate_uncles.insert(detached_block.as_uncle());
1164 }
1165 }
1166
1167 if let Err(e) = block_assembler.update_blank(snapshot).await {
1168 error!("block_assembler update_blank error {}", e);
1169 }
1170 block_assembler.notify().await;
1171 }
1172 }
1173
1174 pub async fn update_block_assembler_after_tx_pool_reorg(&self) {
1175 if let Some(ref block_assembler) = self.block_assembler {
1176 if let Err(e) = block_assembler.update_full(&self.tx_pool).await {
1177 error!("block_assembler update failed {:?}", e);
1178 }
1179 block_assembler.notify().await;
1180 }
1181 }
1182
1183 #[cfg(feature = "internal")]
1184 pub async fn plug_entry(&self, entries: Vec<TxEntry>, target: PlugTarget) {
1185 {
1186 let mut tx_pool = self.tx_pool.write().await;
1187 match target {
1188 PlugTarget::Pending => {
1189 for entry in entries {
1190 tx_pool
1191 .add_pending(entry)
1192 .expect("Plug entry add_pending error");
1193 }
1194 }
1195 PlugTarget::Proposed => {
1196 for entry in entries {
1197 tx_pool
1198 .add_proposed(entry)
1199 .expect("Plug entry add_proposed error");
1200 }
1201 }
1202 };
1203 }
1204
1205 if self.should_notify_block_assembler() {
1206 let msg = match target {
1207 PlugTarget::Pending => BlockAssemblerMessage::Pending,
1208 PlugTarget::Proposed => BlockAssemblerMessage::Proposed,
1209 };
1210 if self.block_assembler_sender.send(msg).await.is_err() {
1211 error!("block_assembler receiver dropped");
1212 }
1213 }
1214 }
1215}