sc_basic_authorship/
basic_authorship.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19//! A consensus proposer for "basic" chains which use the primitive inherent-data.
20
21// FIXME #1021 move this into sp-consensus
22
23use codec::Encode;
24use futures::{
25	channel::oneshot,
26	future,
27	future::{Future, FutureExt},
28};
29use log::{debug, error, info, trace, warn};
30use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder};
31use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO};
32use sc_transaction_pool_api::{InPoolTransaction, TransactionPool, TxInvalidityReportMap};
33use sp_api::{ApiExt, CallApiAt, ProvideRuntimeApi};
34use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend};
35use sp_consensus::{DisableProofRecording, EnableProofRecording, ProofRecording, Proposal};
36use sp_core::traits::SpawnNamed;
37use sp_inherents::InherentData;
38use sp_runtime::{
39	traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT},
40	Digest, ExtrinsicInclusionMode, Percent, SaturatedConversion,
41};
42use std::{marker::PhantomData, pin::Pin, sync::Arc, time};
43
44use prometheus_endpoint::Registry as PrometheusRegistry;
45use sc_proposer_metrics::{EndProposingReason, MetricsLink as PrometheusMetrics};
46
47/// Default block size limit in bytes used by [`Proposer`].
48///
49/// Can be overwritten by [`ProposerFactory::set_default_block_size_limit`].
50///
51/// Be aware that there is also an upper packet size on what the networking code
52/// will accept. If the block doesn't fit in such a package, it can not be
53/// transferred to other nodes.
54pub const DEFAULT_BLOCK_SIZE_LIMIT: usize = 4 * 1024 * 1024 + 512;
55
56const DEFAULT_SOFT_DEADLINE_PERCENT: Percent = Percent::from_percent(50);
57
58const LOG_TARGET: &'static str = "basic-authorship";
59
60/// [`Proposer`] factory.
61pub struct ProposerFactory<A, C, PR> {
62	spawn_handle: Box<dyn SpawnNamed>,
63	/// The client instance.
64	client: Arc<C>,
65	/// The transaction pool.
66	transaction_pool: Arc<A>,
67	/// Prometheus Link,
68	metrics: PrometheusMetrics,
69	/// The default block size limit.
70	///
71	/// If no `block_size_limit` is passed to [`sp_consensus::Proposer::propose`], this block size
72	/// limit will be used.
73	default_block_size_limit: usize,
74	/// Soft deadline percentage of hard deadline.
75	///
76	/// The value is used to compute soft deadline during block production.
77	/// The soft deadline indicates where we should stop attempting to add transactions
78	/// to the block, which exhaust resources. After soft deadline is reached,
79	/// we switch to a fixed-amount mode, in which after we see `MAX_SKIPPED_TRANSACTIONS`
80	/// transactions which exhaust resources, we will conclude that the block is full.
81	soft_deadline_percent: Percent,
82	telemetry: Option<TelemetryHandle>,
83	/// When estimating the block size, should the proof be included?
84	include_proof_in_block_size_estimation: bool,
85	/// phantom member to pin the `ProofRecording` type.
86	_phantom: PhantomData<PR>,
87}
88
89impl<A, C, PR> Clone for ProposerFactory<A, C, PR> {
90	fn clone(&self) -> Self {
91		Self {
92			spawn_handle: self.spawn_handle.clone(),
93			client: self.client.clone(),
94			transaction_pool: self.transaction_pool.clone(),
95			metrics: self.metrics.clone(),
96			default_block_size_limit: self.default_block_size_limit,
97			soft_deadline_percent: self.soft_deadline_percent,
98			telemetry: self.telemetry.clone(),
99			include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation,
100			_phantom: self._phantom,
101		}
102	}
103}
104
105impl<A, C> ProposerFactory<A, C, DisableProofRecording> {
106	/// Create a new proposer factory.
107	///
108	/// Proof recording will be disabled when using proposers built by this instance to build
109	/// blocks.
110	pub fn new(
111		spawn_handle: impl SpawnNamed + 'static,
112		client: Arc<C>,
113		transaction_pool: Arc<A>,
114		prometheus: Option<&PrometheusRegistry>,
115		telemetry: Option<TelemetryHandle>,
116	) -> Self {
117		ProposerFactory {
118			spawn_handle: Box::new(spawn_handle),
119			transaction_pool,
120			metrics: PrometheusMetrics::new(prometheus),
121			default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT,
122			soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT,
123			telemetry,
124			client,
125			include_proof_in_block_size_estimation: false,
126			_phantom: PhantomData,
127		}
128	}
129}
130
131impl<A, C> ProposerFactory<A, C, EnableProofRecording> {
132	/// Create a new proposer factory with proof recording enabled.
133	///
134	/// Each proposer created by this instance will record a proof while building a block.
135	///
136	/// This will also include the proof into the estimation of the block size. This can be disabled
137	/// by calling [`ProposerFactory::disable_proof_in_block_size_estimation`].
138	pub fn with_proof_recording(
139		spawn_handle: impl SpawnNamed + 'static,
140		client: Arc<C>,
141		transaction_pool: Arc<A>,
142		prometheus: Option<&PrometheusRegistry>,
143		telemetry: Option<TelemetryHandle>,
144	) -> Self {
145		ProposerFactory {
146			client,
147			spawn_handle: Box::new(spawn_handle),
148			transaction_pool,
149			metrics: PrometheusMetrics::new(prometheus),
150			default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT,
151			soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT,
152			telemetry,
153			include_proof_in_block_size_estimation: true,
154			_phantom: PhantomData,
155		}
156	}
157
158	/// Disable the proof inclusion when estimating the block size.
159	pub fn disable_proof_in_block_size_estimation(&mut self) {
160		self.include_proof_in_block_size_estimation = false;
161	}
162}
163
164impl<A, C, PR> ProposerFactory<A, C, PR> {
165	/// Set the default block size limit in bytes.
166	///
167	/// The default value for the block size limit is:
168	/// [`DEFAULT_BLOCK_SIZE_LIMIT`].
169	///
170	/// If there is no block size limit passed to [`sp_consensus::Proposer::propose`], this value
171	/// will be used.
172	pub fn set_default_block_size_limit(&mut self, limit: usize) {
173		self.default_block_size_limit = limit;
174	}
175
176	/// Set soft deadline percentage.
177	///
178	/// The value is used to compute soft deadline during block production.
179	/// The soft deadline indicates where we should stop attempting to add transactions
180	/// to the block, which exhaust resources. After soft deadline is reached,
181	/// we switch to a fixed-amount mode, in which after we see `MAX_SKIPPED_TRANSACTIONS`
182	/// transactions which exhaust resources, we will conclude that the block is full.
183	///
184	/// Setting the value too low will significantly limit the amount of transactions
185	/// we try in case they exhaust resources. Setting the value too high can
186	/// potentially open a DoS vector, where many "exhaust resources" transactions
187	/// are being tried with no success, hence block producer ends up creating an empty block.
188	pub fn set_soft_deadline(&mut self, percent: Percent) {
189		self.soft_deadline_percent = percent;
190	}
191}
192
193impl<Block, C, A, PR> ProposerFactory<A, C, PR>
194where
195	A: TransactionPool<Block = Block> + 'static,
196	Block: BlockT,
197	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + Send + Sync + 'static,
198	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
199{
200	fn init_with_now(
201		&mut self,
202		parent_header: &<Block as BlockT>::Header,
203		now: Box<dyn Fn() -> time::Instant + Send + Sync>,
204	) -> Proposer<Block, C, A, PR> {
205		let parent_hash = parent_header.hash();
206
207		info!(
208			"🙌 Starting consensus session on top of parent {:?} (#{})",
209			parent_hash,
210			parent_header.number()
211		);
212
213		let proposer = Proposer::<_, _, _, PR> {
214			spawn_handle: self.spawn_handle.clone(),
215			client: self.client.clone(),
216			parent_hash,
217			parent_number: *parent_header.number(),
218			transaction_pool: self.transaction_pool.clone(),
219			now,
220			metrics: self.metrics.clone(),
221			default_block_size_limit: self.default_block_size_limit,
222			soft_deadline_percent: self.soft_deadline_percent,
223			telemetry: self.telemetry.clone(),
224			_phantom: PhantomData,
225			include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation,
226		};
227
228		proposer
229	}
230}
231
232impl<A, Block, C, PR> sp_consensus::Environment<Block> for ProposerFactory<A, C, PR>
233where
234	A: TransactionPool<Block = Block> + 'static,
235	Block: BlockT,
236	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
237	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
238	PR: ProofRecording,
239{
240	type CreateProposer = future::Ready<Result<Self::Proposer, Self::Error>>;
241	type Proposer = Proposer<Block, C, A, PR>;
242	type Error = sp_blockchain::Error;
243
244	fn init(&mut self, parent_header: &<Block as BlockT>::Header) -> Self::CreateProposer {
245		future::ready(Ok(self.init_with_now(parent_header, Box::new(time::Instant::now))))
246	}
247}
248
249/// The proposer logic.
250pub struct Proposer<Block: BlockT, C, A: TransactionPool, PR> {
251	spawn_handle: Box<dyn SpawnNamed>,
252	client: Arc<C>,
253	parent_hash: Block::Hash,
254	parent_number: <<Block as BlockT>::Header as HeaderT>::Number,
255	transaction_pool: Arc<A>,
256	now: Box<dyn Fn() -> time::Instant + Send + Sync>,
257	metrics: PrometheusMetrics,
258	default_block_size_limit: usize,
259	include_proof_in_block_size_estimation: bool,
260	soft_deadline_percent: Percent,
261	telemetry: Option<TelemetryHandle>,
262	_phantom: PhantomData<PR>,
263}
264
265impl<A, Block, C, PR> sp_consensus::Proposer<Block> for Proposer<Block, C, A, PR>
266where
267	A: TransactionPool<Block = Block> + 'static,
268	Block: BlockT,
269	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
270	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
271	PR: ProofRecording,
272{
273	type Proposal =
274		Pin<Box<dyn Future<Output = Result<Proposal<Block, PR::Proof>, Self::Error>> + Send>>;
275	type Error = sp_blockchain::Error;
276	type ProofRecording = PR;
277	type Proof = PR::Proof;
278
279	fn propose(
280		self,
281		inherent_data: InherentData,
282		inherent_digests: Digest,
283		max_duration: time::Duration,
284		block_size_limit: Option<usize>,
285	) -> Self::Proposal {
286		let (tx, rx) = oneshot::channel();
287		let spawn_handle = self.spawn_handle.clone();
288
289		spawn_handle.spawn_blocking(
290			"basic-authorship-proposer",
291			None,
292			Box::pin(async move {
293				// leave some time for evaluation and block finalization (10%)
294				let deadline = (self.now)() + max_duration - max_duration / 10;
295				let res = self
296					.propose_with(inherent_data, inherent_digests, deadline, block_size_limit)
297					.await;
298				if tx.send(res).is_err() {
299					trace!(
300						target: LOG_TARGET,
301						"Could not send block production result to proposer!"
302					);
303				}
304			}),
305		);
306
307		async move { rx.await? }.boxed()
308	}
309}
310
311/// If the block is full we will attempt to push at most
312/// this number of transactions before quitting for real.
313/// It allows us to increase block utilization.
314const MAX_SKIPPED_TRANSACTIONS: usize = 8;
315
316impl<A, Block, C, PR> Proposer<Block, C, A, PR>
317where
318	A: TransactionPool<Block = Block>,
319	Block: BlockT,
320	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
321	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
322	PR: ProofRecording,
323{
324	async fn propose_with(
325		self,
326		inherent_data: InherentData,
327		inherent_digests: Digest,
328		deadline: time::Instant,
329		block_size_limit: Option<usize>,
330	) -> Result<Proposal<Block, PR::Proof>, sp_blockchain::Error> {
331		let block_timer = time::Instant::now();
332		let mut block_builder = BlockBuilderBuilder::new(&*self.client)
333			.on_parent_block(self.parent_hash)
334			.with_parent_block_number(self.parent_number)
335			.with_proof_recording(PR::ENABLED)
336			.with_inherent_digests(inherent_digests)
337			.build()?;
338
339		self.apply_inherents(&mut block_builder, inherent_data)?;
340
341		let mode = block_builder.extrinsic_inclusion_mode();
342		let end_reason = match mode {
343			ExtrinsicInclusionMode::AllExtrinsics =>
344				self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?,
345			ExtrinsicInclusionMode::OnlyInherents => EndProposingReason::TransactionForbidden,
346		};
347		let (block, storage_changes, proof) = block_builder.build()?.into_inner();
348		let block_took = block_timer.elapsed();
349
350		let proof =
351			PR::into_proof(proof).map_err(|e| sp_blockchain::Error::Application(Box::new(e)))?;
352
353		self.print_summary(&block, end_reason, block_took, block_timer.elapsed());
354		Ok(Proposal { block, proof, storage_changes })
355	}
356
357	/// Apply all inherents to the block.
358	fn apply_inherents(
359		&self,
360		block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C>,
361		inherent_data: InherentData,
362	) -> Result<(), sp_blockchain::Error> {
363		let create_inherents_start = time::Instant::now();
364		let inherents = block_builder.create_inherents(inherent_data)?;
365		let create_inherents_end = time::Instant::now();
366
367		self.metrics.report(|metrics| {
368			metrics.create_inherents_time.observe(
369				create_inherents_end
370					.saturating_duration_since(create_inherents_start)
371					.as_secs_f64(),
372			);
373		});
374
375		for inherent in inherents {
376			match block_builder.push(inherent) {
377				Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => {
378					warn!(
379						target: LOG_TARGET,
380						"⚠️  Dropping non-mandatory inherent from overweight block."
381					)
382				},
383				Err(ApplyExtrinsicFailed(Validity(e))) if e.was_mandatory() => {
384					error!(
385						"❌️ Mandatory inherent extrinsic returned error. Block cannot be produced."
386					);
387					return Err(ApplyExtrinsicFailed(Validity(e)))
388				},
389				Err(e) => {
390					warn!(
391						target: LOG_TARGET,
392						"❗️ Inherent extrinsic returned unexpected error: {}. Dropping.", e
393					);
394				},
395				Ok(_) => {},
396			}
397		}
398		Ok(())
399	}
400
401	/// Apply as many extrinsics as possible to the block.
402	async fn apply_extrinsics(
403		&self,
404		block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C>,
405		deadline: time::Instant,
406		block_size_limit: Option<usize>,
407	) -> Result<EndProposingReason, sp_blockchain::Error> {
408		// proceed with transactions
409		// We calculate soft deadline used only in case we start skipping transactions.
410		let now = (self.now)();
411		let left = deadline.saturating_duration_since(now);
412		let left_micros: u64 = left.as_micros().saturated_into();
413		let soft_deadline =
414			now + time::Duration::from_micros(self.soft_deadline_percent.mul_floor(left_micros));
415		let mut skipped = 0;
416		let mut unqueue_invalid = TxInvalidityReportMap::new();
417
418		let delay = deadline.saturating_duration_since((self.now)()) / 8;
419		let mut pending_iterator =
420			self.transaction_pool.ready_at_with_timeout(self.parent_hash, delay).await;
421
422		let block_size_limit = block_size_limit.unwrap_or(self.default_block_size_limit);
423
424		debug!(target: LOG_TARGET, "Attempting to push transactions from the pool at {:?}.", self.parent_hash);
425		let mut transaction_pushed = false;
426
427		let end_reason = loop {
428			let pending_tx = if let Some(pending_tx) = pending_iterator.next() {
429				pending_tx
430			} else {
431				debug!(
432					target: LOG_TARGET,
433					"No more transactions, proceeding with proposing."
434				);
435
436				break EndProposingReason::NoMoreTransactions
437			};
438
439			let now = (self.now)();
440			if now > deadline {
441				debug!(
442					target: LOG_TARGET,
443					"Consensus deadline reached when pushing block transactions, \
444				proceeding with proposing."
445				);
446				break EndProposingReason::HitDeadline
447			}
448
449			let pending_tx_data = (**pending_tx.data()).clone();
450			let pending_tx_hash = pending_tx.hash().clone();
451
452			let block_size =
453				block_builder.estimate_block_size(self.include_proof_in_block_size_estimation);
454			if block_size + pending_tx_data.encoded_size() > block_size_limit {
455				pending_iterator.report_invalid(&pending_tx);
456				if skipped < MAX_SKIPPED_TRANSACTIONS {
457					skipped += 1;
458					debug!(
459						target: LOG_TARGET,
460						"Transaction would overflow the block size limit, \
461					 but will try {} more transactions before quitting.",
462						MAX_SKIPPED_TRANSACTIONS - skipped,
463					);
464					continue
465				} else if now < soft_deadline {
466					debug!(
467						target: LOG_TARGET,
468						"Transaction would overflow the block size limit, \
469					 but we still have time before the soft deadline, so \
470					 we will try a bit more."
471					);
472					continue
473				} else {
474					debug!(
475						target: LOG_TARGET,
476						"Reached block size limit, proceeding with proposing."
477					);
478					break EndProposingReason::HitBlockSizeLimit
479				}
480			}
481
482			trace!(target: LOG_TARGET, "[{:?}] Pushing to the block.", pending_tx_hash);
483			match sc_block_builder::BlockBuilder::push(block_builder, pending_tx_data) {
484				Ok(()) => {
485					transaction_pushed = true;
486					trace!(target: LOG_TARGET, "[{:?}] Pushed to the block.", pending_tx_hash);
487				},
488				Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => {
489					pending_iterator.report_invalid(&pending_tx);
490					if skipped < MAX_SKIPPED_TRANSACTIONS {
491						skipped += 1;
492						debug!(target: LOG_TARGET,
493							"Block seems full, but will try {} more transactions before quitting.",
494							MAX_SKIPPED_TRANSACTIONS - skipped,
495						);
496					} else if (self.now)() < soft_deadline {
497						debug!(target: LOG_TARGET,
498							"Block seems full, but we still have time before the soft deadline, \
499							 so we will try a bit more before quitting."
500						);
501					} else {
502						debug!(
503							target: LOG_TARGET,
504							"Reached block weight limit, proceeding with proposing."
505						);
506						break EndProposingReason::HitBlockWeightLimit
507					}
508				},
509				Err(e) => {
510					pending_iterator.report_invalid(&pending_tx);
511					debug!(
512						target: LOG_TARGET,
513						"[{:?}] Invalid transaction: {} at: {}", pending_tx_hash, e, self.parent_hash
514					);
515
516					let error_to_report = match e {
517						ApplyExtrinsicFailed(Validity(e)) => Some(e),
518						_ => None,
519					};
520
521					unqueue_invalid.insert(pending_tx_hash, error_to_report);
522				},
523			}
524		};
525
526		if matches!(end_reason, EndProposingReason::HitBlockSizeLimit) && !transaction_pushed {
527			warn!(
528				target: LOG_TARGET,
529				"Hit block size limit of `{}` without including any transaction!", block_size_limit,
530			);
531		}
532
533		self.transaction_pool
534			.report_invalid(Some(self.parent_hash), unqueue_invalid)
535			.await;
536		Ok(end_reason)
537	}
538
539	/// Prints a summary and does telemetry + metrics.
540	///
541	/// - `block`: The block that was build.
542	/// - `end_reason`: Why did we stop producing the block?
543	/// - `block_took`: How long did it took to produce the actual block?
544	/// - `propose_took`: How long did the entire proposing took?
545	fn print_summary(
546		&self,
547		block: &Block,
548		end_reason: EndProposingReason,
549		block_took: time::Duration,
550		propose_took: time::Duration,
551	) {
552		let extrinsics = block.extrinsics();
553		self.metrics.report(|metrics| {
554			metrics.number_of_transactions.set(extrinsics.len() as u64);
555			metrics.block_constructed.observe(block_took.as_secs_f64());
556			metrics.report_end_proposing_reason(end_reason);
557			metrics.create_block_proposal_time.observe(propose_took.as_secs_f64());
558		});
559
560		let extrinsics_summary = if extrinsics.is_empty() {
561			"no extrinsics".to_string()
562		} else {
563			format!(
564				"extrinsics ({}): [{}]",
565				extrinsics.len(),
566				extrinsics
567					.iter()
568					.map(|xt| BlakeTwo256::hash_of(xt).to_string())
569					.collect::<Vec<_>>()
570					.join(", ")
571			)
572		};
573
574		if log::log_enabled!(log::Level::Info) {
575			info!(
576				"🎁 Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; extrinsics_count: {}",
577				block.header().number(),
578				block_took.as_millis(),
579				<Block as BlockT>::Hash::from(block.header().hash()),
580				block.header().parent_hash(),
581				end_reason,
582				extrinsics.len()
583			)
584		} else if log::log_enabled!(log::Level::Trace) {
585			trace!(
586				"🎁 Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; {extrinsics_summary}",
587				block.header().number(),
588				block_took.as_millis(),
589				<Block as BlockT>::Hash::from(block.header().hash()),
590				block.header().parent_hash(),
591				end_reason
592			);
593		}
594
595		telemetry!(
596			self.telemetry;
597			CONSENSUS_INFO;
598			"prepared_block_for_proposing";
599			"number" => ?block.header().number(),
600			"hash" => ?<Block as BlockT>::Hash::from(block.header().hash()),
601		);
602	}
603}
604
605#[cfg(test)]
606mod tests {
607	use super::*;
608
609	use futures::executor::block_on;
610	use parking_lot::Mutex;
611	use sc_client_api::{Backend, TrieCacheContext};
612	use sc_transaction_pool::BasicPool;
613	use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionSource};
614	use sp_api::Core;
615	use sp_blockchain::HeaderBackend;
616	use sp_consensus::{BlockOrigin, Environment, Proposer};
617	use sp_runtime::{generic::BlockId, traits::NumberFor, Perbill};
618	use substrate_test_runtime_client::{
619		prelude::*,
620		runtime::{Block as TestBlock, Extrinsic, ExtrinsicBuilder, Transfer},
621		TestClientBuilder, TestClientBuilderExt,
622	};
623
624	const SOURCE: TransactionSource = TransactionSource::External;
625
626	// Note:
627	// Maximum normal extrinsic size for `substrate_test_runtime` is ~65% of max_block (refer to
628	// `substrate_test_runtime::RuntimeBlockWeights` for details).
629	// This extrinsic sizing allows for:
630	// - one huge xts + a lot of tiny dust
631	// - one huge, no medium,
632	// - two medium xts
633	// This is widely exploited in following tests.
634	const HUGE: u32 = 649000000;
635	const MEDIUM: u32 = 250000000;
636	const TINY: u32 = 1000;
637
638	fn extrinsic(nonce: u64) -> Extrinsic {
639		ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)).nonce(nonce).build()
640	}
641
642	fn chain_event<B: BlockT>(header: B::Header) -> ChainEvent<B>
643	where
644		NumberFor<B>: From<u64>,
645	{
646		ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }
647	}
648
649	#[test]
650	fn should_cease_building_block_when_deadline_is_reached() {
651		// given
652		let client = Arc::new(substrate_test_runtime_client::new());
653		let spawner = sp_core::testing::TaskExecutor::new();
654		let txpool = Arc::from(BasicPool::new_full(
655			Default::default(),
656			true.into(),
657			None,
658			spawner.clone(),
659			client.clone(),
660		));
661
662		let hashof0 = client.info().genesis_hash;
663		block_on(txpool.submit_at(hashof0, SOURCE, vec![extrinsic(0), extrinsic(1)])).unwrap();
664
665		block_on(
666			txpool.maintain(chain_event(
667				client.expect_header(hashof0).expect("there should be header"),
668			)),
669		);
670
671		let mut proposer_factory =
672			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
673
674		let cell = Mutex::new((false, time::Instant::now()));
675		let proposer = proposer_factory.init_with_now(
676			&client.expect_header(hashof0).unwrap(),
677			Box::new(move || {
678				let mut value = cell.lock();
679				if !value.0 {
680					value.0 = true;
681					return value.1
682				}
683				let old = value.1;
684				let new = old + time::Duration::from_secs(1);
685				*value = (true, new);
686				old
687			}),
688		);
689
690		// when
691		let deadline = time::Duration::from_secs(3);
692		let block =
693			block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
694				.map(|r| r.block)
695				.unwrap();
696
697		// then
698		// block should have some extrinsics although we have some more in the pool.
699		assert_eq!(block.extrinsics().len(), 1);
700		assert_eq!(txpool.ready().count(), 2);
701	}
702
703	#[test]
704	fn should_not_panic_when_deadline_is_reached() {
705		let client = Arc::new(substrate_test_runtime_client::new());
706		let spawner = sp_core::testing::TaskExecutor::new();
707		let txpool = Arc::from(BasicPool::new_full(
708			Default::default(),
709			true.into(),
710			None,
711			spawner.clone(),
712			client.clone(),
713		));
714
715		let mut proposer_factory =
716			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
717
718		let cell = Mutex::new((false, time::Instant::now()));
719		let proposer = proposer_factory.init_with_now(
720			&client.expect_header(client.info().genesis_hash).unwrap(),
721			Box::new(move || {
722				let mut value = cell.lock();
723				if !value.0 {
724					value.0 = true;
725					return value.1
726				}
727				let new = value.1 + time::Duration::from_secs(160);
728				*value = (true, new);
729				new
730			}),
731		);
732
733		let deadline = time::Duration::from_secs(1);
734		block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
735			.map(|r| r.block)
736			.unwrap();
737	}
738
739	#[test]
740	fn proposed_storage_changes_should_match_execute_block_storage_changes() {
741		let (client, backend) = TestClientBuilder::new().build_with_backend();
742		let client = Arc::new(client);
743		let spawner = sp_core::testing::TaskExecutor::new();
744		let txpool = Arc::from(BasicPool::new_full(
745			Default::default(),
746			true.into(),
747			None,
748			spawner.clone(),
749			client.clone(),
750		));
751
752		let genesis_hash = client.info().best_hash;
753
754		block_on(txpool.submit_at(genesis_hash, SOURCE, vec![extrinsic(0)])).unwrap();
755
756		block_on(
757			txpool.maintain(chain_event(
758				client
759					.expect_header(client.info().genesis_hash)
760					.expect("there should be header"),
761			)),
762		);
763
764		let mut proposer_factory =
765			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
766
767		let proposer = proposer_factory.init_with_now(
768			&client.header(genesis_hash).unwrap().unwrap(),
769			Box::new(move || time::Instant::now()),
770		);
771
772		let deadline = time::Duration::from_secs(9);
773		let proposal =
774			block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
775				.unwrap();
776
777		assert_eq!(proposal.block.extrinsics().len(), 1);
778
779		let api = client.runtime_api();
780		api.execute_block(genesis_hash, proposal.block).unwrap();
781
782		let state = backend.state_at(genesis_hash, TrieCacheContext::Untrusted).unwrap();
783
784		let storage_changes = api.into_storage_changes(&state, genesis_hash).unwrap();
785
786		assert_eq!(
787			proposal.storage_changes.transaction_storage_root,
788			storage_changes.transaction_storage_root,
789		);
790	}
791
792	// This test ensures that if one transaction of a user was rejected, because for example
793	// the weight limit was hit, we don't mark the other transactions of the user as invalid because
794	// the nonce is not matching.
795	#[test]
796	fn should_not_remove_invalid_transactions_from_the_same_sender_after_one_was_invalid() {
797		// given
798		let client = Arc::new(substrate_test_runtime_client::new());
799		let spawner = sp_core::testing::TaskExecutor::new();
800		let txpool = Arc::from(BasicPool::new_full(
801			Default::default(),
802			true.into(),
803			None,
804			spawner.clone(),
805			client.clone(),
806		));
807
808		let medium = |nonce| {
809			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(MEDIUM))
810				.nonce(nonce)
811				.build()
812		};
813		let huge = |nonce| {
814			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE)).nonce(nonce).build()
815		};
816
817		block_on(txpool.submit_at(
818			client.info().genesis_hash,
819			SOURCE,
820			vec![medium(0), medium(1), huge(2), medium(3), huge(4), medium(5), medium(6)],
821		))
822		.unwrap();
823
824		let mut proposer_factory =
825			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
826		let mut propose_block = |client: &TestClient,
827		                         parent_number,
828		                         expected_block_extrinsics,
829		                         expected_pool_transactions| {
830			let hash = client.expect_block_hash_from_id(&BlockId::Number(parent_number)).unwrap();
831			let proposer = proposer_factory.init_with_now(
832				&client.expect_header(hash).unwrap(),
833				Box::new(move || time::Instant::now()),
834			);
835
836			// when
837			let deadline = time::Duration::from_secs(900);
838			let block =
839				block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
840					.map(|r| r.block)
841					.unwrap();
842
843			// then
844			// block should have some extrinsics although we have some more in the pool.
845			assert_eq!(
846				txpool.ready().count(),
847				expected_pool_transactions,
848				"at block: {}",
849				block.header.number
850			);
851			assert_eq!(
852				block.extrinsics().len(),
853				expected_block_extrinsics,
854				"at block: {}",
855				block.header.number
856			);
857
858			block
859		};
860
861		let import_and_maintain = |client: Arc<TestClient>, block: TestBlock| {
862			let hash = block.hash();
863			block_on(client.import(BlockOrigin::Own, block)).unwrap();
864			block_on(txpool.maintain(chain_event(
865				client.expect_header(hash).expect("there should be header"),
866			)));
867		};
868
869		block_on(
870			txpool.maintain(chain_event(
871				client
872					.expect_header(client.info().genesis_hash)
873					.expect("there should be header"),
874			)),
875		);
876		assert_eq!(txpool.ready().count(), 7);
877
878		// let's create one block and import it
879		let block = propose_block(&client, 0, 2, 7);
880		import_and_maintain(client.clone(), block.clone());
881		assert_eq!(txpool.ready().count(), 5);
882
883		// now let's make sure that we can still make some progress
884		let block = propose_block(&client, 1, 1, 5);
885		import_and_maintain(client.clone(), block.clone());
886		assert_eq!(txpool.ready().count(), 4);
887
888		// again let's make sure that we can still make some progress
889		let block = propose_block(&client, 2, 1, 4);
890		import_and_maintain(client.clone(), block.clone());
891		assert_eq!(txpool.ready().count(), 3);
892
893		// again let's make sure that we can still make some progress
894		let block = propose_block(&client, 3, 1, 3);
895		import_and_maintain(client.clone(), block.clone());
896		assert_eq!(txpool.ready().count(), 2);
897
898		// again let's make sure that we can still make some progress
899		let block = propose_block(&client, 4, 2, 2);
900		import_and_maintain(client.clone(), block.clone());
901		assert_eq!(txpool.ready().count(), 0);
902	}
903
904	#[test]
905	fn should_cease_building_block_when_block_limit_is_reached() {
906		let client = Arc::new(substrate_test_runtime_client::new());
907		let spawner = sp_core::testing::TaskExecutor::new();
908		let txpool = Arc::from(BasicPool::new_full(
909			Default::default(),
910			true.into(),
911			None,
912			spawner.clone(),
913			client.clone(),
914		));
915		let genesis_hash = client.info().genesis_hash;
916		let genesis_header = client.expect_header(genesis_hash).expect("there should be header");
917
918		let extrinsics_num = 5;
919		let extrinsics = std::iter::once(
920			Transfer {
921				from: Sr25519Keyring::Alice.into(),
922				to: Sr25519Keyring::Bob.into(),
923				amount: 100,
924				nonce: 0,
925			}
926			.into_unchecked_extrinsic(),
927		)
928		.chain((1..extrinsics_num as u64).map(extrinsic))
929		.collect::<Vec<_>>();
930
931		let block_limit = genesis_header.encoded_size() +
932			extrinsics
933				.iter()
934				.take(extrinsics_num - 1)
935				.map(Encode::encoded_size)
936				.sum::<usize>() +
937			Vec::<Extrinsic>::new().encoded_size();
938
939		block_on(txpool.submit_at(genesis_hash, SOURCE, extrinsics.clone())).unwrap();
940
941		block_on(txpool.maintain(chain_event(genesis_header.clone())));
942
943		let mut proposer_factory =
944			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
945
946		let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
947
948		// Give it enough time
949		let deadline = time::Duration::from_secs(300);
950		let block = block_on(proposer.propose(
951			Default::default(),
952			Default::default(),
953			deadline,
954			Some(block_limit),
955		))
956		.map(|r| r.block)
957		.unwrap();
958
959		// Based on the block limit, one transaction shouldn't be included.
960		assert_eq!(block.extrinsics().len(), extrinsics_num - 1);
961
962		let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
963
964		let block =
965			block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
966				.map(|r| r.block)
967				.unwrap();
968
969		// Without a block limit we should include all of them
970		assert_eq!(block.extrinsics().len(), extrinsics_num);
971
972		let mut proposer_factory = ProposerFactory::with_proof_recording(
973			spawner.clone(),
974			client.clone(),
975			txpool.clone(),
976			None,
977			None,
978		);
979
980		let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
981
982		// Exact block_limit, which includes:
983		// 99 (header_size) + 718 (proof@initialize_block) + 246 (one Transfer extrinsic)
984		let block_limit = {
985			let builder = BlockBuilderBuilder::new(&*client)
986				.on_parent_block(genesis_header.hash())
987				.with_parent_block_number(0)
988				.enable_proof_recording()
989				.build()
990				.unwrap();
991			builder.estimate_block_size(true) + extrinsics[0].encoded_size()
992		};
993		let block = block_on(proposer.propose(
994			Default::default(),
995			Default::default(),
996			deadline,
997			Some(block_limit),
998		))
999		.map(|r| r.block)
1000		.unwrap();
1001
1002		// The block limit was increased, but we now include the proof in the estimation of the
1003		// block size and thus, only the `Transfer` will fit into the block. It reads more data
1004		// than we have reserved in the block limit.
1005		assert_eq!(block.extrinsics().len(), 1);
1006	}
1007
1008	#[test]
1009	fn should_keep_adding_transactions_after_exhausts_resources_before_soft_deadline() {
1010		// given
1011		let client = Arc::new(substrate_test_runtime_client::new());
1012		let spawner = sp_core::testing::TaskExecutor::new();
1013		let txpool = Arc::from(BasicPool::new_full(
1014			Default::default(),
1015			true.into(),
1016			None,
1017			spawner.clone(),
1018			client.clone(),
1019		));
1020		let genesis_hash = client.info().genesis_hash;
1021
1022		let tiny = |nonce| {
1023			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)).nonce(nonce).build()
1024		};
1025		let huge = |who| {
1026			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE))
1027				.signer(Sr25519Keyring::numeric(who))
1028				.build()
1029		};
1030
1031		block_on(
1032			txpool.submit_at(
1033				genesis_hash,
1034				SOURCE,
1035				// add 2 * MAX_SKIPPED_TRANSACTIONS that exhaust resources
1036				(0..MAX_SKIPPED_TRANSACTIONS * 2)
1037					.into_iter()
1038					.map(huge)
1039					// and some transactions that are okay.
1040					.chain((0..MAX_SKIPPED_TRANSACTIONS as u64).into_iter().map(tiny))
1041					.collect(),
1042			),
1043		)
1044		.unwrap();
1045
1046		block_on(txpool.maintain(chain_event(
1047			client.expect_header(genesis_hash).expect("there should be header"),
1048		)));
1049		assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 3);
1050
1051		let mut proposer_factory =
1052			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
1053
1054		let cell = Mutex::new(time::Instant::now());
1055		let proposer = proposer_factory.init_with_now(
1056			&client.expect_header(genesis_hash).unwrap(),
1057			Box::new(move || {
1058				let mut value = cell.lock();
1059				let old = *value;
1060				*value = old + time::Duration::from_secs(1);
1061				old
1062			}),
1063		);
1064
1065		// when
1066		// give it enough time so that deadline is never triggered.
1067		let deadline = time::Duration::from_secs(900);
1068		let block =
1069			block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
1070				.map(|r| r.block)
1071				.unwrap();
1072
1073		// then block should have all non-exhaust resources extrinsics (+ the first one).
1074		assert_eq!(block.extrinsics().len(), MAX_SKIPPED_TRANSACTIONS + 1);
1075	}
1076
1077	#[test]
1078	fn should_only_skip_up_to_some_limit_after_soft_deadline() {
1079		// given
1080		let client = Arc::new(substrate_test_runtime_client::new());
1081		let spawner = sp_core::testing::TaskExecutor::new();
1082		let txpool = Arc::from(BasicPool::new_full(
1083			Default::default(),
1084			true.into(),
1085			None,
1086			spawner.clone(),
1087			client.clone(),
1088		));
1089		let genesis_hash = client.info().genesis_hash;
1090
1091		let tiny = |who| {
1092			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY))
1093				.signer(Sr25519Keyring::numeric(who))
1094				.nonce(1)
1095				.build()
1096		};
1097		let huge = |who| {
1098			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE))
1099				.signer(Sr25519Keyring::numeric(who))
1100				.build()
1101		};
1102
1103		block_on(
1104			txpool.submit_at(
1105				genesis_hash,
1106				SOURCE,
1107				(0..MAX_SKIPPED_TRANSACTIONS + 2)
1108					.into_iter()
1109					.map(huge)
1110					// and some transactions that are okay.
1111					.chain((0..MAX_SKIPPED_TRANSACTIONS + 2).into_iter().map(tiny))
1112					.collect(),
1113			),
1114		)
1115		.unwrap();
1116
1117		block_on(txpool.maintain(chain_event(
1118			client.expect_header(genesis_hash).expect("there should be header"),
1119		)));
1120		assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 2 + 4);
1121
1122		let mut proposer_factory =
1123			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
1124
1125		let deadline = time::Duration::from_secs(600);
1126		let cell = Arc::new(Mutex::new((0, time::Instant::now())));
1127		let cell2 = cell.clone();
1128		let proposer = proposer_factory.init_with_now(
1129			&client.expect_header(genesis_hash).unwrap(),
1130			Box::new(move || {
1131				let mut value = cell.lock();
1132				let (called, old) = *value;
1133				// add time after deadline is calculated internally (hence 1)
1134				let increase = if called == 1 {
1135					// we start after the soft_deadline should have already been reached.
1136					deadline / 2
1137				} else {
1138					// but we make sure to never reach the actual deadline
1139					time::Duration::from_millis(0)
1140				};
1141				*value = (called + 1, old + increase);
1142				old
1143			}),
1144		);
1145
1146		let block =
1147			block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
1148				.map(|r| r.block)
1149				.unwrap();
1150
1151		// then the block should have one or two transactions. This maybe random as they are
1152		// processed in parallel. The same signer and consecutive nonces for huge and tiny
1153		// transactions guarantees that max two transactions will get to the block.
1154		assert!(
1155			(1..3).contains(&block.extrinsics().len()),
1156			"Block shall contain one or two extrinsics."
1157		);
1158		assert!(
1159			cell2.lock().0 > MAX_SKIPPED_TRANSACTIONS,
1160			"Not enough calls to current time, which indicates the test might have ended because of deadline, not soft deadline"
1161		);
1162	}
1163}