1use codec::Encode;
24use futures::{
25 channel::oneshot,
26 future,
27 future::{Future, FutureExt},
28};
29use log::{debug, error, info, trace, warn};
30use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder};
31use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO};
32use sc_transaction_pool_api::{InPoolTransaction, TransactionPool};
33use sp_api::{ApiExt, CallApiAt, ProvideRuntimeApi};
34use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend};
35use sp_consensus::{DisableProofRecording, EnableProofRecording, ProofRecording, Proposal};
36use sp_core::traits::SpawnNamed;
37use sp_inherents::InherentData;
38use sp_runtime::{
39 traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT},
40 Digest, ExtrinsicInclusionMode, Percent, SaturatedConversion,
41};
42use std::{marker::PhantomData, pin::Pin, sync::Arc, time};
43
44use prometheus_endpoint::Registry as PrometheusRegistry;
45use sc_proposer_metrics::{EndProposingReason, MetricsLink as PrometheusMetrics};
46
47pub const DEFAULT_BLOCK_SIZE_LIMIT: usize = 4 * 1024 * 1024 + 512;
55
56const DEFAULT_SOFT_DEADLINE_PERCENT: Percent = Percent::from_percent(50);
57
58const LOG_TARGET: &'static str = "basic-authorship";
59
60pub struct ProposerFactory<A, C, PR> {
62 spawn_handle: Box<dyn SpawnNamed>,
63 client: Arc<C>,
65 transaction_pool: Arc<A>,
67 metrics: PrometheusMetrics,
69 default_block_size_limit: usize,
74 soft_deadline_percent: Percent,
82 telemetry: Option<TelemetryHandle>,
83 include_proof_in_block_size_estimation: bool,
85 _phantom: PhantomData<PR>,
87}
88
89impl<A, C, PR> Clone for ProposerFactory<A, C, PR> {
90 fn clone(&self) -> Self {
91 Self {
92 spawn_handle: self.spawn_handle.clone(),
93 client: self.client.clone(),
94 transaction_pool: self.transaction_pool.clone(),
95 metrics: self.metrics.clone(),
96 default_block_size_limit: self.default_block_size_limit,
97 soft_deadline_percent: self.soft_deadline_percent,
98 telemetry: self.telemetry.clone(),
99 include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation,
100 _phantom: self._phantom,
101 }
102 }
103}
104
105impl<A, C> ProposerFactory<A, C, DisableProofRecording> {
106 pub fn new(
111 spawn_handle: impl SpawnNamed + 'static,
112 client: Arc<C>,
113 transaction_pool: Arc<A>,
114 prometheus: Option<&PrometheusRegistry>,
115 telemetry: Option<TelemetryHandle>,
116 ) -> Self {
117 ProposerFactory {
118 spawn_handle: Box::new(spawn_handle),
119 transaction_pool,
120 metrics: PrometheusMetrics::new(prometheus),
121 default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT,
122 soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT,
123 telemetry,
124 client,
125 include_proof_in_block_size_estimation: false,
126 _phantom: PhantomData,
127 }
128 }
129}
130
131impl<A, C> ProposerFactory<A, C, EnableProofRecording> {
132 pub fn with_proof_recording(
139 spawn_handle: impl SpawnNamed + 'static,
140 client: Arc<C>,
141 transaction_pool: Arc<A>,
142 prometheus: Option<&PrometheusRegistry>,
143 telemetry: Option<TelemetryHandle>,
144 ) -> Self {
145 ProposerFactory {
146 client,
147 spawn_handle: Box::new(spawn_handle),
148 transaction_pool,
149 metrics: PrometheusMetrics::new(prometheus),
150 default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT,
151 soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT,
152 telemetry,
153 include_proof_in_block_size_estimation: true,
154 _phantom: PhantomData,
155 }
156 }
157
158 pub fn disable_proof_in_block_size_estimation(&mut self) {
160 self.include_proof_in_block_size_estimation = false;
161 }
162}
163
164impl<A, C, PR> ProposerFactory<A, C, PR> {
165 pub fn set_default_block_size_limit(&mut self, limit: usize) {
173 self.default_block_size_limit = limit;
174 }
175
176 pub fn set_soft_deadline(&mut self, percent: Percent) {
189 self.soft_deadline_percent = percent;
190 }
191}
192
193impl<Block, C, A, PR> ProposerFactory<A, C, PR>
194where
195 A: TransactionPool<Block = Block> + 'static,
196 Block: BlockT,
197 C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + Send + Sync + 'static,
198 C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
199{
200 fn init_with_now(
201 &mut self,
202 parent_header: &<Block as BlockT>::Header,
203 now: Box<dyn Fn() -> time::Instant + Send + Sync>,
204 ) -> Proposer<Block, C, A, PR> {
205 let parent_hash = parent_header.hash();
206
207 info!(
208 "🙌 Starting consensus session on top of parent {:?} (#{})",
209 parent_hash,
210 parent_header.number()
211 );
212
213 let proposer = Proposer::<_, _, _, PR> {
214 spawn_handle: self.spawn_handle.clone(),
215 client: self.client.clone(),
216 parent_hash,
217 parent_number: *parent_header.number(),
218 transaction_pool: self.transaction_pool.clone(),
219 now,
220 metrics: self.metrics.clone(),
221 default_block_size_limit: self.default_block_size_limit,
222 soft_deadline_percent: self.soft_deadline_percent,
223 telemetry: self.telemetry.clone(),
224 _phantom: PhantomData,
225 include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation,
226 };
227
228 proposer
229 }
230}
231
232impl<A, Block, C, PR> sp_consensus::Environment<Block> for ProposerFactory<A, C, PR>
233where
234 A: TransactionPool<Block = Block> + 'static,
235 Block: BlockT,
236 C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
237 C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
238 PR: ProofRecording,
239{
240 type CreateProposer = future::Ready<Result<Self::Proposer, Self::Error>>;
241 type Proposer = Proposer<Block, C, A, PR>;
242 type Error = sp_blockchain::Error;
243
244 fn init(&mut self, parent_header: &<Block as BlockT>::Header) -> Self::CreateProposer {
245 future::ready(Ok(self.init_with_now(parent_header, Box::new(time::Instant::now))))
246 }
247}
248
249pub struct Proposer<Block: BlockT, C, A: TransactionPool, PR> {
251 spawn_handle: Box<dyn SpawnNamed>,
252 client: Arc<C>,
253 parent_hash: Block::Hash,
254 parent_number: <<Block as BlockT>::Header as HeaderT>::Number,
255 transaction_pool: Arc<A>,
256 now: Box<dyn Fn() -> time::Instant + Send + Sync>,
257 metrics: PrometheusMetrics,
258 default_block_size_limit: usize,
259 include_proof_in_block_size_estimation: bool,
260 soft_deadline_percent: Percent,
261 telemetry: Option<TelemetryHandle>,
262 _phantom: PhantomData<PR>,
263}
264
265impl<A, Block, C, PR> sp_consensus::Proposer<Block> for Proposer<Block, C, A, PR>
266where
267 A: TransactionPool<Block = Block> + 'static,
268 Block: BlockT,
269 C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
270 C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
271 PR: ProofRecording,
272{
273 type Proposal =
274 Pin<Box<dyn Future<Output = Result<Proposal<Block, PR::Proof>, Self::Error>> + Send>>;
275 type Error = sp_blockchain::Error;
276 type ProofRecording = PR;
277 type Proof = PR::Proof;
278
279 fn propose(
280 self,
281 inherent_data: InherentData,
282 inherent_digests: Digest,
283 max_duration: time::Duration,
284 block_size_limit: Option<usize>,
285 ) -> Self::Proposal {
286 let (tx, rx) = oneshot::channel();
287 let spawn_handle = self.spawn_handle.clone();
288
289 spawn_handle.spawn_blocking(
290 "basic-authorship-proposer",
291 None,
292 Box::pin(async move {
293 let deadline = (self.now)() + max_duration - max_duration / 3;
295 let res = self
296 .propose_with(inherent_data, inherent_digests, deadline, block_size_limit)
297 .await;
298 if tx.send(res).is_err() {
299 trace!(
300 target: LOG_TARGET,
301 "Could not send block production result to proposer!"
302 );
303 }
304 }),
305 );
306
307 async move { rx.await? }.boxed()
308 }
309}
310
311const MAX_SKIPPED_TRANSACTIONS: usize = 8;
315
316impl<A, Block, C, PR> Proposer<Block, C, A, PR>
317where
318 A: TransactionPool<Block = Block>,
319 Block: BlockT,
320 C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
321 C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
322 PR: ProofRecording,
323{
324 async fn propose_with(
325 self,
326 inherent_data: InherentData,
327 inherent_digests: Digest,
328 deadline: time::Instant,
329 block_size_limit: Option<usize>,
330 ) -> Result<Proposal<Block, PR::Proof>, sp_blockchain::Error> {
331 let block_timer = time::Instant::now();
332 let mut block_builder = BlockBuilderBuilder::new(&*self.client)
333 .on_parent_block(self.parent_hash)
334 .with_parent_block_number(self.parent_number)
335 .with_proof_recording(PR::ENABLED)
336 .with_inherent_digests(inherent_digests)
337 .build()?;
338
339 self.apply_inherents(&mut block_builder, inherent_data)?;
340
341 let mode = block_builder.extrinsic_inclusion_mode();
342 let end_reason = match mode {
343 ExtrinsicInclusionMode::AllExtrinsics =>
344 self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?,
345 ExtrinsicInclusionMode::OnlyInherents => EndProposingReason::TransactionForbidden,
346 };
347 let (block, storage_changes, proof) = block_builder.build()?.into_inner();
348 let block_took = block_timer.elapsed();
349
350 let proof =
351 PR::into_proof(proof).map_err(|e| sp_blockchain::Error::Application(Box::new(e)))?;
352
353 self.print_summary(&block, end_reason, block_took, block_timer.elapsed());
354 Ok(Proposal { block, proof, storage_changes })
355 }
356
357 fn apply_inherents(
359 &self,
360 block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C>,
361 inherent_data: InherentData,
362 ) -> Result<(), sp_blockchain::Error> {
363 let create_inherents_start = time::Instant::now();
364 let inherents = block_builder.create_inherents(inherent_data)?;
365 let create_inherents_end = time::Instant::now();
366
367 self.metrics.report(|metrics| {
368 metrics.create_inherents_time.observe(
369 create_inherents_end
370 .saturating_duration_since(create_inherents_start)
371 .as_secs_f64(),
372 );
373 });
374
375 for inherent in inherents {
376 match block_builder.push(inherent) {
377 Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => {
378 warn!(
379 target: LOG_TARGET,
380 "⚠️ Dropping non-mandatory inherent from overweight block."
381 )
382 },
383 Err(ApplyExtrinsicFailed(Validity(e))) if e.was_mandatory() => {
384 error!(
385 "❌️ Mandatory inherent extrinsic returned error. Block cannot be produced."
386 );
387 return Err(ApplyExtrinsicFailed(Validity(e)))
388 },
389 Err(e) => {
390 warn!(
391 target: LOG_TARGET,
392 "❗️ Inherent extrinsic returned unexpected error: {}. Dropping.", e
393 );
394 },
395 Ok(_) => {},
396 }
397 }
398 Ok(())
399 }
400
401 async fn apply_extrinsics(
403 &self,
404 block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C>,
405 deadline: time::Instant,
406 block_size_limit: Option<usize>,
407 ) -> Result<EndProposingReason, sp_blockchain::Error> {
408 let now = (self.now)();
411 let left = deadline.saturating_duration_since(now);
412 let left_micros: u64 = left.as_micros().saturated_into();
413 let soft_deadline =
414 now + time::Duration::from_micros(self.soft_deadline_percent.mul_floor(left_micros));
415 let mut skipped = 0;
416 let mut unqueue_invalid = Vec::new();
417
418 let delay = deadline.saturating_duration_since((self.now)()) / 8;
419 let mut pending_iterator =
420 self.transaction_pool.ready_at_with_timeout(self.parent_hash, delay).await;
421
422 let block_size_limit = block_size_limit.unwrap_or(self.default_block_size_limit);
423
424 debug!(target: LOG_TARGET, "Attempting to push transactions from the pool at {:?}.", self.parent_hash);
425 let mut transaction_pushed = false;
426
427 let end_reason = loop {
428 let pending_tx = if let Some(pending_tx) = pending_iterator.next() {
429 pending_tx
430 } else {
431 debug!(
432 target: LOG_TARGET,
433 "No more transactions, proceeding with proposing."
434 );
435
436 break EndProposingReason::NoMoreTransactions
437 };
438
439 let now = (self.now)();
440 if now > deadline {
441 debug!(
442 target: LOG_TARGET,
443 "Consensus deadline reached when pushing block transactions, \
444 proceeding with proposing."
445 );
446 break EndProposingReason::HitDeadline
447 }
448
449 let pending_tx_data = (**pending_tx.data()).clone();
450 let pending_tx_hash = pending_tx.hash().clone();
451
452 let block_size =
453 block_builder.estimate_block_size(self.include_proof_in_block_size_estimation);
454 if block_size + pending_tx_data.encoded_size() > block_size_limit {
455 pending_iterator.report_invalid(&pending_tx);
456 if skipped < MAX_SKIPPED_TRANSACTIONS {
457 skipped += 1;
458 debug!(
459 target: LOG_TARGET,
460 "Transaction would overflow the block size limit, \
461 but will try {} more transactions before quitting.",
462 MAX_SKIPPED_TRANSACTIONS - skipped,
463 );
464 continue
465 } else if now < soft_deadline {
466 debug!(
467 target: LOG_TARGET,
468 "Transaction would overflow the block size limit, \
469 but we still have time before the soft deadline, so \
470 we will try a bit more."
471 );
472 continue
473 } else {
474 debug!(
475 target: LOG_TARGET,
476 "Reached block size limit, proceeding with proposing."
477 );
478 break EndProposingReason::HitBlockSizeLimit
479 }
480 }
481
482 trace!(target: LOG_TARGET, "[{:?}] Pushing to the block.", pending_tx_hash);
483 match sc_block_builder::BlockBuilder::push(block_builder, pending_tx_data) {
484 Ok(()) => {
485 transaction_pushed = true;
486 debug!(target: LOG_TARGET, "[{:?}] Pushed to the block.", pending_tx_hash);
487 },
488 Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => {
489 pending_iterator.report_invalid(&pending_tx);
490 if skipped < MAX_SKIPPED_TRANSACTIONS {
491 skipped += 1;
492 debug!(target: LOG_TARGET,
493 "Block seems full, but will try {} more transactions before quitting.",
494 MAX_SKIPPED_TRANSACTIONS - skipped,
495 );
496 } else if (self.now)() < soft_deadline {
497 debug!(target: LOG_TARGET,
498 "Block seems full, but we still have time before the soft deadline, \
499 so we will try a bit more before quitting."
500 );
501 } else {
502 debug!(
503 target: LOG_TARGET,
504 "Reached block weight limit, proceeding with proposing."
505 );
506 break EndProposingReason::HitBlockWeightLimit
507 }
508 },
509 Err(e) => {
510 pending_iterator.report_invalid(&pending_tx);
511 debug!(
512 target: LOG_TARGET,
513 "[{:?}] Invalid transaction: {} at: {}", pending_tx_hash, e, self.parent_hash
514 );
515 unqueue_invalid.push(pending_tx_hash);
516 },
517 }
518 };
519
520 if matches!(end_reason, EndProposingReason::HitBlockSizeLimit) && !transaction_pushed {
521 warn!(
522 target: LOG_TARGET,
523 "Hit block size limit of `{}` without including any transaction!", block_size_limit,
524 );
525 }
526
527 self.transaction_pool.remove_invalid(&unqueue_invalid);
528 Ok(end_reason)
529 }
530
531 fn print_summary(
538 &self,
539 block: &Block,
540 end_reason: EndProposingReason,
541 block_took: time::Duration,
542 propose_took: time::Duration,
543 ) {
544 let extrinsics = block.extrinsics();
545 self.metrics.report(|metrics| {
546 metrics.number_of_transactions.set(extrinsics.len() as u64);
547 metrics.block_constructed.observe(block_took.as_secs_f64());
548 metrics.report_end_proposing_reason(end_reason);
549 metrics.create_block_proposal_time.observe(propose_took.as_secs_f64());
550 });
551
552 let extrinsics_summary = if extrinsics.is_empty() {
553 "no extrinsics".to_string()
554 } else {
555 format!(
556 "extrinsics ({}): [{}]",
557 extrinsics.len(),
558 extrinsics
559 .iter()
560 .map(|xt| BlakeTwo256::hash_of(xt).to_string())
561 .collect::<Vec<_>>()
562 .join(", ")
563 )
564 };
565
566 if log::log_enabled!(log::Level::Info) {
567 info!(
568 "🎁 Prepared block for proposing at {} ({} ms) [hash: {:?}; parent_hash: {}; extrinsics_count: {}",
569 block.header().number(),
570 block_took.as_millis(),
571 <Block as BlockT>::Hash::from(block.header().hash()),
572 block.header().parent_hash(),
573 extrinsics.len()
574 )
575 } else if log::log_enabled!(log::Level::Debug) {
576 debug!(
577 "🎁 Prepared block for proposing at {} ({} ms) [hash: {:?}; parent_hash: {}; {extrinsics_summary}",
578 block.header().number(),
579 block_took.as_millis(),
580 <Block as BlockT>::Hash::from(block.header().hash()),
581 block.header().parent_hash(),
582 );
583 }
584
585 telemetry!(
586 self.telemetry;
587 CONSENSUS_INFO;
588 "prepared_block_for_proposing";
589 "number" => ?block.header().number(),
590 "hash" => ?<Block as BlockT>::Hash::from(block.header().hash()),
591 );
592 }
593}
594
595#[cfg(test)]
596mod tests {
597 use super::*;
598
599 use futures::executor::block_on;
600 use parking_lot::Mutex;
601 use sc_client_api::Backend;
602 use sc_transaction_pool::BasicPool;
603 use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionSource};
604 use sp_api::Core;
605 use sp_blockchain::HeaderBackend;
606 use sp_consensus::{BlockOrigin, Environment, Proposer};
607 use sp_runtime::{generic::BlockId, traits::NumberFor, Perbill};
608 use substrate_test_runtime_client::{
609 prelude::*,
610 runtime::{Block as TestBlock, Extrinsic, ExtrinsicBuilder, Transfer},
611 TestClientBuilder, TestClientBuilderExt,
612 };
613
614 const SOURCE: TransactionSource = TransactionSource::External;
615
616 const HUGE: u32 = 649000000;
625 const MEDIUM: u32 = 250000000;
626 const TINY: u32 = 1000;
627
628 fn extrinsic(nonce: u64) -> Extrinsic {
629 ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)).nonce(nonce).build()
630 }
631
632 fn chain_event<B: BlockT>(header: B::Header) -> ChainEvent<B>
633 where
634 NumberFor<B>: From<u64>,
635 {
636 ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }
637 }
638
639 #[test]
640 fn should_cease_building_block_when_deadline_is_reached() {
641 let client = Arc::new(substrate_test_runtime_client::new());
643 let spawner = sp_core::testing::TaskExecutor::new();
644 let txpool = Arc::from(BasicPool::new_full(
645 Default::default(),
646 true.into(),
647 None,
648 spawner.clone(),
649 client.clone(),
650 ));
651
652 let hashof0 = client.info().genesis_hash;
653 block_on(txpool.submit_at(hashof0, SOURCE, vec![extrinsic(0), extrinsic(1)])).unwrap();
654
655 block_on(
656 txpool.maintain(chain_event(
657 client.expect_header(hashof0).expect("there should be header"),
658 )),
659 );
660
661 let mut proposer_factory =
662 ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
663
664 let cell = Mutex::new((false, time::Instant::now()));
665 let proposer = proposer_factory.init_with_now(
666 &client.expect_header(hashof0).unwrap(),
667 Box::new(move || {
668 let mut value = cell.lock();
669 if !value.0 {
670 value.0 = true;
671 return value.1
672 }
673 let old = value.1;
674 let new = old + time::Duration::from_secs(1);
675 *value = (true, new);
676 old
677 }),
678 );
679
680 let deadline = time::Duration::from_secs(3);
682 let block =
683 block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
684 .map(|r| r.block)
685 .unwrap();
686
687 assert_eq!(block.extrinsics().len(), 1);
690 assert_eq!(txpool.ready().count(), 2);
691 }
692
693 #[test]
694 fn should_not_panic_when_deadline_is_reached() {
695 let client = Arc::new(substrate_test_runtime_client::new());
696 let spawner = sp_core::testing::TaskExecutor::new();
697 let txpool = Arc::from(BasicPool::new_full(
698 Default::default(),
699 true.into(),
700 None,
701 spawner.clone(),
702 client.clone(),
703 ));
704
705 let mut proposer_factory =
706 ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
707
708 let cell = Mutex::new((false, time::Instant::now()));
709 let proposer = proposer_factory.init_with_now(
710 &client.expect_header(client.info().genesis_hash).unwrap(),
711 Box::new(move || {
712 let mut value = cell.lock();
713 if !value.0 {
714 value.0 = true;
715 return value.1
716 }
717 let new = value.1 + time::Duration::from_secs(160);
718 *value = (true, new);
719 new
720 }),
721 );
722
723 let deadline = time::Duration::from_secs(1);
724 block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
725 .map(|r| r.block)
726 .unwrap();
727 }
728
729 #[test]
730 fn proposed_storage_changes_should_match_execute_block_storage_changes() {
731 let (client, backend) = TestClientBuilder::new().build_with_backend();
732 let client = Arc::new(client);
733 let spawner = sp_core::testing::TaskExecutor::new();
734 let txpool = Arc::from(BasicPool::new_full(
735 Default::default(),
736 true.into(),
737 None,
738 spawner.clone(),
739 client.clone(),
740 ));
741
742 let genesis_hash = client.info().best_hash;
743
744 block_on(txpool.submit_at(genesis_hash, SOURCE, vec![extrinsic(0)])).unwrap();
745
746 block_on(
747 txpool.maintain(chain_event(
748 client
749 .expect_header(client.info().genesis_hash)
750 .expect("there should be header"),
751 )),
752 );
753
754 let mut proposer_factory =
755 ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
756
757 let proposer = proposer_factory.init_with_now(
758 &client.header(genesis_hash).unwrap().unwrap(),
759 Box::new(move || time::Instant::now()),
760 );
761
762 let deadline = time::Duration::from_secs(9);
763 let proposal =
764 block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
765 .unwrap();
766
767 assert_eq!(proposal.block.extrinsics().len(), 1);
768
769 let api = client.runtime_api();
770 api.execute_block(genesis_hash, proposal.block).unwrap();
771
772 let state = backend.state_at(genesis_hash).unwrap();
773
774 let storage_changes = api.into_storage_changes(&state, genesis_hash).unwrap();
775
776 assert_eq!(
777 proposal.storage_changes.transaction_storage_root,
778 storage_changes.transaction_storage_root,
779 );
780 }
781
782 #[test]
786 fn should_not_remove_invalid_transactions_from_the_same_sender_after_one_was_invalid() {
787 let client = Arc::new(substrate_test_runtime_client::new());
789 let spawner = sp_core::testing::TaskExecutor::new();
790 let txpool = Arc::from(BasicPool::new_full(
791 Default::default(),
792 true.into(),
793 None,
794 spawner.clone(),
795 client.clone(),
796 ));
797
798 let medium = |nonce| {
799 ExtrinsicBuilder::new_fill_block(Perbill::from_parts(MEDIUM))
800 .nonce(nonce)
801 .build()
802 };
803 let huge = |nonce| {
804 ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE)).nonce(nonce).build()
805 };
806
807 block_on(txpool.submit_at(
808 client.info().genesis_hash,
809 SOURCE,
810 vec![medium(0), medium(1), huge(2), medium(3), huge(4), medium(5), medium(6)],
811 ))
812 .unwrap();
813
814 let mut proposer_factory =
815 ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
816 let mut propose_block = |client: &TestClient,
817 parent_number,
818 expected_block_extrinsics,
819 expected_pool_transactions| {
820 let hash = client.expect_block_hash_from_id(&BlockId::Number(parent_number)).unwrap();
821 let proposer = proposer_factory.init_with_now(
822 &client.expect_header(hash).unwrap(),
823 Box::new(move || time::Instant::now()),
824 );
825
826 let deadline = time::Duration::from_secs(900);
828 let block =
829 block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
830 .map(|r| r.block)
831 .unwrap();
832
833 assert_eq!(
836 txpool.ready().count(),
837 expected_pool_transactions,
838 "at block: {}",
839 block.header.number
840 );
841 assert_eq!(
842 block.extrinsics().len(),
843 expected_block_extrinsics,
844 "at block: {}",
845 block.header.number
846 );
847
848 block
849 };
850
851 let import_and_maintain = |client: Arc<TestClient>, block: TestBlock| {
852 let hash = block.hash();
853 block_on(client.import(BlockOrigin::Own, block)).unwrap();
854 block_on(txpool.maintain(chain_event(
855 client.expect_header(hash).expect("there should be header"),
856 )));
857 };
858
859 block_on(
860 txpool.maintain(chain_event(
861 client
862 .expect_header(client.info().genesis_hash)
863 .expect("there should be header"),
864 )),
865 );
866 assert_eq!(txpool.ready().count(), 7);
867
868 let block = propose_block(&client, 0, 2, 7);
870 import_and_maintain(client.clone(), block.clone());
871 assert_eq!(txpool.ready().count(), 5);
872
873 let block = propose_block(&client, 1, 1, 5);
875 import_and_maintain(client.clone(), block.clone());
876 assert_eq!(txpool.ready().count(), 4);
877
878 let block = propose_block(&client, 2, 1, 4);
880 import_and_maintain(client.clone(), block.clone());
881 assert_eq!(txpool.ready().count(), 3);
882
883 let block = propose_block(&client, 3, 1, 3);
885 import_and_maintain(client.clone(), block.clone());
886 assert_eq!(txpool.ready().count(), 2);
887
888 let block = propose_block(&client, 4, 2, 2);
890 import_and_maintain(client.clone(), block.clone());
891 assert_eq!(txpool.ready().count(), 0);
892 }
893
894 #[test]
895 fn should_cease_building_block_when_block_limit_is_reached() {
896 let client = Arc::new(substrate_test_runtime_client::new());
897 let spawner = sp_core::testing::TaskExecutor::new();
898 let txpool = Arc::from(BasicPool::new_full(
899 Default::default(),
900 true.into(),
901 None,
902 spawner.clone(),
903 client.clone(),
904 ));
905 let genesis_hash = client.info().genesis_hash;
906 let genesis_header = client.expect_header(genesis_hash).expect("there should be header");
907
908 let extrinsics_num = 5;
909 let extrinsics = std::iter::once(
910 Transfer {
911 from: AccountKeyring::Alice.into(),
912 to: AccountKeyring::Bob.into(),
913 amount: 100,
914 nonce: 0,
915 }
916 .into_unchecked_extrinsic(),
917 )
918 .chain((1..extrinsics_num as u64).map(extrinsic))
919 .collect::<Vec<_>>();
920
921 let block_limit = genesis_header.encoded_size() +
922 extrinsics
923 .iter()
924 .take(extrinsics_num - 1)
925 .map(Encode::encoded_size)
926 .sum::<usize>() +
927 Vec::<Extrinsic>::new().encoded_size();
928
929 block_on(txpool.submit_at(genesis_hash, SOURCE, extrinsics.clone())).unwrap();
930
931 block_on(txpool.maintain(chain_event(genesis_header.clone())));
932
933 let mut proposer_factory =
934 ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
935
936 let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
937
938 let deadline = time::Duration::from_secs(300);
940 let block = block_on(proposer.propose(
941 Default::default(),
942 Default::default(),
943 deadline,
944 Some(block_limit),
945 ))
946 .map(|r| r.block)
947 .unwrap();
948
949 assert_eq!(block.extrinsics().len(), extrinsics_num - 1);
951
952 let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
953
954 let block =
955 block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
956 .map(|r| r.block)
957 .unwrap();
958
959 assert_eq!(block.extrinsics().len(), extrinsics_num);
961
962 let mut proposer_factory = ProposerFactory::with_proof_recording(
963 spawner.clone(),
964 client.clone(),
965 txpool.clone(),
966 None,
967 None,
968 );
969
970 let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
971
972 let block_limit = {
975 let builder = BlockBuilderBuilder::new(&*client)
976 .on_parent_block(genesis_header.hash())
977 .with_parent_block_number(0)
978 .enable_proof_recording()
979 .build()
980 .unwrap();
981 builder.estimate_block_size(true) + extrinsics[0].encoded_size()
982 };
983 let block = block_on(proposer.propose(
984 Default::default(),
985 Default::default(),
986 deadline,
987 Some(block_limit),
988 ))
989 .map(|r| r.block)
990 .unwrap();
991
992 assert_eq!(block.extrinsics().len(), 1);
996 }
997
998 #[test]
999 fn should_keep_adding_transactions_after_exhausts_resources_before_soft_deadline() {
1000 let client = Arc::new(substrate_test_runtime_client::new());
1002 let spawner = sp_core::testing::TaskExecutor::new();
1003 let txpool = Arc::from(BasicPool::new_full(
1004 Default::default(),
1005 true.into(),
1006 None,
1007 spawner.clone(),
1008 client.clone(),
1009 ));
1010 let genesis_hash = client.info().genesis_hash;
1011
1012 let tiny = |nonce| {
1013 ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)).nonce(nonce).build()
1014 };
1015 let huge = |who| {
1016 ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE))
1017 .signer(AccountKeyring::numeric(who))
1018 .build()
1019 };
1020
1021 block_on(
1022 txpool.submit_at(
1023 genesis_hash,
1024 SOURCE,
1025 (0..MAX_SKIPPED_TRANSACTIONS * 2)
1027 .into_iter()
1028 .map(huge)
1029 .chain((0..MAX_SKIPPED_TRANSACTIONS as u64).into_iter().map(tiny))
1031 .collect(),
1032 ),
1033 )
1034 .unwrap();
1035
1036 block_on(txpool.maintain(chain_event(
1037 client.expect_header(genesis_hash).expect("there should be header"),
1038 )));
1039 assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 3);
1040
1041 let mut proposer_factory =
1042 ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
1043
1044 let cell = Mutex::new(time::Instant::now());
1045 let proposer = proposer_factory.init_with_now(
1046 &client.expect_header(genesis_hash).unwrap(),
1047 Box::new(move || {
1048 let mut value = cell.lock();
1049 let old = *value;
1050 *value = old + time::Duration::from_secs(1);
1051 old
1052 }),
1053 );
1054
1055 let deadline = time::Duration::from_secs(900);
1058 let block =
1059 block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
1060 .map(|r| r.block)
1061 .unwrap();
1062
1063 assert_eq!(block.extrinsics().len(), MAX_SKIPPED_TRANSACTIONS + 1);
1065 }
1066
1067 #[test]
1068 fn should_only_skip_up_to_some_limit_after_soft_deadline() {
1069 let client = Arc::new(substrate_test_runtime_client::new());
1071 let spawner = sp_core::testing::TaskExecutor::new();
1072 let txpool = Arc::from(BasicPool::new_full(
1073 Default::default(),
1074 true.into(),
1075 None,
1076 spawner.clone(),
1077 client.clone(),
1078 ));
1079 let genesis_hash = client.info().genesis_hash;
1080
1081 let tiny = |who| {
1082 ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY))
1083 .signer(AccountKeyring::numeric(who))
1084 .nonce(1)
1085 .build()
1086 };
1087 let huge = |who| {
1088 ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE))
1089 .signer(AccountKeyring::numeric(who))
1090 .build()
1091 };
1092
1093 block_on(
1094 txpool.submit_at(
1095 genesis_hash,
1096 SOURCE,
1097 (0..MAX_SKIPPED_TRANSACTIONS + 2)
1098 .into_iter()
1099 .map(huge)
1100 .chain((0..MAX_SKIPPED_TRANSACTIONS + 2).into_iter().map(tiny))
1102 .collect(),
1103 ),
1104 )
1105 .unwrap();
1106
1107 block_on(txpool.maintain(chain_event(
1108 client.expect_header(genesis_hash).expect("there should be header"),
1109 )));
1110 assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 2 + 4);
1111
1112 let mut proposer_factory =
1113 ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
1114
1115 let deadline = time::Duration::from_secs(600);
1116 let cell = Arc::new(Mutex::new((0, time::Instant::now())));
1117 let cell2 = cell.clone();
1118 let proposer = proposer_factory.init_with_now(
1119 &client.expect_header(genesis_hash).unwrap(),
1120 Box::new(move || {
1121 let mut value = cell.lock();
1122 let (called, old) = *value;
1123 let increase = if called == 1 {
1125 deadline / 2
1127 } else {
1128 time::Duration::from_millis(0)
1130 };
1131 *value = (called + 1, old + increase);
1132 old
1133 }),
1134 );
1135
1136 let block =
1137 block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
1138 .map(|r| r.block)
1139 .unwrap();
1140
1141 assert!(
1145 (1..3).contains(&block.extrinsics().len()),
1146 "Block shall contain one or two extrinsics."
1147 );
1148 assert!(
1149 cell2.lock().0 > MAX_SKIPPED_TRANSACTIONS,
1150 "Not enough calls to current time, which indicates the test might have ended because of deadline, not soft deadline"
1151 );
1152 }
1153}