sc_basic_authorship/
basic_authorship.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19//! A consensus proposer for "basic" chains which use the primitive inherent-data.
20
21// FIXME #1021 move this into sp-consensus
22
23use codec::Encode;
24use futures::{
25	channel::oneshot,
26	future,
27	future::{Future, FutureExt},
28};
29use log::{debug, error, info, trace, warn};
30use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder};
31use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO};
32use sc_transaction_pool_api::{InPoolTransaction, TransactionPool, TxInvalidityReportMap};
33use sp_api::{ApiExt, CallApiAt, ProvideRuntimeApi};
34use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend};
35use sp_consensus::{DisableProofRecording, EnableProofRecording, ProofRecording, Proposal};
36use sp_core::traits::SpawnNamed;
37use sp_inherents::InherentData;
38use sp_runtime::{
39	traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT},
40	Digest, ExtrinsicInclusionMode, Percent, SaturatedConversion,
41};
42use std::{marker::PhantomData, pin::Pin, sync::Arc, time};
43
44use prometheus_endpoint::Registry as PrometheusRegistry;
45use sc_proposer_metrics::{EndProposingReason, MetricsLink as PrometheusMetrics};
46
47/// Default block size limit in bytes used by [`Proposer`].
48///
49/// Can be overwritten by [`ProposerFactory::set_default_block_size_limit`].
50///
51/// Be aware that there is also an upper packet size on what the networking code
52/// will accept. If the block doesn't fit in such a package, it can not be
53/// transferred to other nodes.
54pub const DEFAULT_BLOCK_SIZE_LIMIT: usize = 4 * 1024 * 1024 + 512;
55
56const DEFAULT_SOFT_DEADLINE_PERCENT: Percent = Percent::from_percent(50);
57
58const LOG_TARGET: &'static str = "basic-authorship";
59
60/// [`Proposer`] factory.
61pub struct ProposerFactory<A, C, PR> {
62	spawn_handle: Box<dyn SpawnNamed>,
63	/// The client instance.
64	client: Arc<C>,
65	/// The transaction pool.
66	transaction_pool: Arc<A>,
67	/// Prometheus Link,
68	metrics: PrometheusMetrics,
69	/// The default block size limit.
70	///
71	/// If no `block_size_limit` is passed to [`sp_consensus::Proposer::propose`], this block size
72	/// limit will be used.
73	default_block_size_limit: usize,
74	/// Soft deadline percentage of hard deadline.
75	///
76	/// The value is used to compute soft deadline during block production.
77	/// The soft deadline indicates where we should stop attempting to add transactions
78	/// to the block, which exhaust resources. After soft deadline is reached,
79	/// we switch to a fixed-amount mode, in which after we see `MAX_SKIPPED_TRANSACTIONS`
80	/// transactions which exhaust resources, we will conclude that the block is full.
81	soft_deadline_percent: Percent,
82	telemetry: Option<TelemetryHandle>,
83	/// When estimating the block size, should the proof be included?
84	include_proof_in_block_size_estimation: bool,
85	/// phantom member to pin the `ProofRecording` type.
86	_phantom: PhantomData<PR>,
87}
88
89impl<A, C, PR> Clone for ProposerFactory<A, C, PR> {
90	fn clone(&self) -> Self {
91		Self {
92			spawn_handle: self.spawn_handle.clone(),
93			client: self.client.clone(),
94			transaction_pool: self.transaction_pool.clone(),
95			metrics: self.metrics.clone(),
96			default_block_size_limit: self.default_block_size_limit,
97			soft_deadline_percent: self.soft_deadline_percent,
98			telemetry: self.telemetry.clone(),
99			include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation,
100			_phantom: self._phantom,
101		}
102	}
103}
104
105impl<A, C> ProposerFactory<A, C, DisableProofRecording> {
106	/// Create a new proposer factory.
107	///
108	/// Proof recording will be disabled when using proposers built by this instance to build
109	/// blocks.
110	pub fn new(
111		spawn_handle: impl SpawnNamed + 'static,
112		client: Arc<C>,
113		transaction_pool: Arc<A>,
114		prometheus: Option<&PrometheusRegistry>,
115		telemetry: Option<TelemetryHandle>,
116	) -> Self {
117		ProposerFactory {
118			spawn_handle: Box::new(spawn_handle),
119			transaction_pool,
120			metrics: PrometheusMetrics::new(prometheus),
121			default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT,
122			soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT,
123			telemetry,
124			client,
125			include_proof_in_block_size_estimation: false,
126			_phantom: PhantomData,
127		}
128	}
129}
130
131impl<A, C> ProposerFactory<A, C, EnableProofRecording> {
132	/// Create a new proposer factory with proof recording enabled.
133	///
134	/// Each proposer created by this instance will record a proof while building a block.
135	///
136	/// This will also include the proof into the estimation of the block size. This can be disabled
137	/// by calling [`ProposerFactory::disable_proof_in_block_size_estimation`].
138	pub fn with_proof_recording(
139		spawn_handle: impl SpawnNamed + 'static,
140		client: Arc<C>,
141		transaction_pool: Arc<A>,
142		prometheus: Option<&PrometheusRegistry>,
143		telemetry: Option<TelemetryHandle>,
144	) -> Self {
145		ProposerFactory {
146			client,
147			spawn_handle: Box::new(spawn_handle),
148			transaction_pool,
149			metrics: PrometheusMetrics::new(prometheus),
150			default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT,
151			soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT,
152			telemetry,
153			include_proof_in_block_size_estimation: true,
154			_phantom: PhantomData,
155		}
156	}
157
158	/// Disable the proof inclusion when estimating the block size.
159	pub fn disable_proof_in_block_size_estimation(&mut self) {
160		self.include_proof_in_block_size_estimation = false;
161	}
162}
163
164impl<A, C, PR> ProposerFactory<A, C, PR> {
165	/// Set the default block size limit in bytes.
166	///
167	/// The default value for the block size limit is:
168	/// [`DEFAULT_BLOCK_SIZE_LIMIT`].
169	///
170	/// If there is no block size limit passed to [`sp_consensus::Proposer::propose`], this value
171	/// will be used.
172	pub fn set_default_block_size_limit(&mut self, limit: usize) {
173		self.default_block_size_limit = limit;
174	}
175
176	/// Set soft deadline percentage.
177	///
178	/// The value is used to compute soft deadline during block production.
179	/// The soft deadline indicates where we should stop attempting to add transactions
180	/// to the block, which exhaust resources. After soft deadline is reached,
181	/// we switch to a fixed-amount mode, in which after we see `MAX_SKIPPED_TRANSACTIONS`
182	/// transactions which exhaust resources, we will conclude that the block is full.
183	///
184	/// Setting the value too low will significantly limit the amount of transactions
185	/// we try in case they exhaust resources. Setting the value too high can
186	/// potentially open a DoS vector, where many "exhaust resources" transactions
187	/// are being tried with no success, hence block producer ends up creating an empty block.
188	pub fn set_soft_deadline(&mut self, percent: Percent) {
189		self.soft_deadline_percent = percent;
190	}
191}
192
193impl<Block, C, A, PR> ProposerFactory<A, C, PR>
194where
195	A: TransactionPool<Block = Block> + 'static,
196	Block: BlockT,
197	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + Send + Sync + 'static,
198	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
199{
200	fn init_with_now(
201		&mut self,
202		parent_header: &<Block as BlockT>::Header,
203		now: Box<dyn Fn() -> time::Instant + Send + Sync>,
204	) -> Proposer<Block, C, A, PR> {
205		let parent_hash = parent_header.hash();
206
207		info!(
208			"🙌 Starting consensus session on top of parent {:?} (#{})",
209			parent_hash,
210			parent_header.number()
211		);
212
213		let proposer = Proposer::<_, _, _, PR> {
214			spawn_handle: self.spawn_handle.clone(),
215			client: self.client.clone(),
216			parent_hash,
217			parent_number: *parent_header.number(),
218			transaction_pool: self.transaction_pool.clone(),
219			now,
220			metrics: self.metrics.clone(),
221			default_block_size_limit: self.default_block_size_limit,
222			soft_deadline_percent: self.soft_deadline_percent,
223			telemetry: self.telemetry.clone(),
224			_phantom: PhantomData,
225			include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation,
226		};
227
228		proposer
229	}
230}
231
232impl<A, Block, C, PR> sp_consensus::Environment<Block> for ProposerFactory<A, C, PR>
233where
234	A: TransactionPool<Block = Block> + 'static,
235	Block: BlockT,
236	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
237	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
238	PR: ProofRecording,
239{
240	type CreateProposer = future::Ready<Result<Self::Proposer, Self::Error>>;
241	type Proposer = Proposer<Block, C, A, PR>;
242	type Error = sp_blockchain::Error;
243
244	fn init(&mut self, parent_header: &<Block as BlockT>::Header) -> Self::CreateProposer {
245		future::ready(Ok(self.init_with_now(parent_header, Box::new(time::Instant::now))))
246	}
247}
248
249/// The proposer logic.
250pub struct Proposer<Block: BlockT, C, A: TransactionPool, PR> {
251	spawn_handle: Box<dyn SpawnNamed>,
252	client: Arc<C>,
253	parent_hash: Block::Hash,
254	parent_number: <<Block as BlockT>::Header as HeaderT>::Number,
255	transaction_pool: Arc<A>,
256	now: Box<dyn Fn() -> time::Instant + Send + Sync>,
257	metrics: PrometheusMetrics,
258	default_block_size_limit: usize,
259	include_proof_in_block_size_estimation: bool,
260	soft_deadline_percent: Percent,
261	telemetry: Option<TelemetryHandle>,
262	_phantom: PhantomData<PR>,
263}
264
265impl<A, Block, C, PR> sp_consensus::Proposer<Block> for Proposer<Block, C, A, PR>
266where
267	A: TransactionPool<Block = Block> + 'static,
268	Block: BlockT,
269	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
270	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
271	PR: ProofRecording,
272{
273	type Proposal =
274		Pin<Box<dyn Future<Output = Result<Proposal<Block, PR::Proof>, Self::Error>> + Send>>;
275	type Error = sp_blockchain::Error;
276	type ProofRecording = PR;
277	type Proof = PR::Proof;
278
279	fn propose(
280		self,
281		inherent_data: InherentData,
282		inherent_digests: Digest,
283		max_duration: time::Duration,
284		block_size_limit: Option<usize>,
285	) -> Self::Proposal {
286		let (tx, rx) = oneshot::channel();
287		let spawn_handle = self.spawn_handle.clone();
288
289		spawn_handle.spawn_blocking(
290			"basic-authorship-proposer",
291			None,
292			Box::pin(async move {
293				// leave some time for evaluation and block finalization (10%)
294				let deadline = (self.now)() + max_duration - max_duration / 10;
295				let res = self
296					.propose_with(inherent_data, inherent_digests, deadline, block_size_limit)
297					.await;
298				if tx.send(res).is_err() {
299					trace!(
300						target: LOG_TARGET,
301						"Could not send block production result to proposer!"
302					);
303				}
304			}),
305		);
306
307		async move { rx.await? }.boxed()
308	}
309}
310
311/// If the block is full we will attempt to push at most
312/// this number of transactions before quitting for real.
313/// It allows us to increase block utilization.
314const MAX_SKIPPED_TRANSACTIONS: usize = 8;
315
316impl<A, Block, C, PR> Proposer<Block, C, A, PR>
317where
318	A: TransactionPool<Block = Block>,
319	Block: BlockT,
320	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
321	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
322	PR: ProofRecording,
323{
324	async fn propose_with(
325		self,
326		inherent_data: InherentData,
327		inherent_digests: Digest,
328		deadline: time::Instant,
329		block_size_limit: Option<usize>,
330	) -> Result<Proposal<Block, PR::Proof>, sp_blockchain::Error> {
331		let block_timer = time::Instant::now();
332		let mut block_builder = BlockBuilderBuilder::new(&*self.client)
333			.on_parent_block(self.parent_hash)
334			.with_parent_block_number(self.parent_number)
335			.with_proof_recording(PR::ENABLED)
336			.with_inherent_digests(inherent_digests)
337			.build()?;
338
339		self.apply_inherents(&mut block_builder, inherent_data)?;
340
341		let mode = block_builder.extrinsic_inclusion_mode();
342		let end_reason = match mode {
343			ExtrinsicInclusionMode::AllExtrinsics =>
344				self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?,
345			ExtrinsicInclusionMode::OnlyInherents => EndProposingReason::TransactionForbidden,
346		};
347		let (block, storage_changes, proof) = block_builder.build()?.into_inner();
348		let block_took = block_timer.elapsed();
349
350		let proof =
351			PR::into_proof(proof).map_err(|e| sp_blockchain::Error::Application(Box::new(e)))?;
352
353		self.print_summary(&block, end_reason, block_took, block_timer.elapsed());
354		Ok(Proposal { block, proof, storage_changes })
355	}
356
357	/// Apply all inherents to the block.
358	fn apply_inherents(
359		&self,
360		block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C>,
361		inherent_data: InherentData,
362	) -> Result<(), sp_blockchain::Error> {
363		let create_inherents_start = time::Instant::now();
364		let inherents = block_builder.create_inherents(inherent_data)?;
365		let create_inherents_end = time::Instant::now();
366
367		self.metrics.report(|metrics| {
368			metrics.create_inherents_time.observe(
369				create_inherents_end
370					.saturating_duration_since(create_inherents_start)
371					.as_secs_f64(),
372			);
373		});
374
375		for inherent in inherents {
376			match block_builder.push(inherent) {
377				Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => {
378					warn!(
379						target: LOG_TARGET,
380						"⚠️  Dropping non-mandatory inherent from overweight block."
381					)
382				},
383				Err(ApplyExtrinsicFailed(Validity(e))) if e.was_mandatory() => {
384					error!(
385						"❌️ Mandatory inherent extrinsic returned error. Block cannot be produced."
386					);
387					return Err(ApplyExtrinsicFailed(Validity(e)))
388				},
389				Err(e) => {
390					warn!(
391						target: LOG_TARGET,
392						"❗️ Inherent extrinsic returned unexpected error: {}. Dropping.", e
393					);
394				},
395				Ok(_) => {},
396			}
397		}
398		Ok(())
399	}
400
401	/// Apply as many extrinsics as possible to the block.
402	async fn apply_extrinsics(
403		&self,
404		block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C>,
405		deadline: time::Instant,
406		block_size_limit: Option<usize>,
407	) -> Result<EndProposingReason, sp_blockchain::Error> {
408		// proceed with transactions
409		// We calculate soft deadline used only in case we start skipping transactions.
410		let now = (self.now)();
411		let left = deadline.saturating_duration_since(now);
412		let left_micros: u64 = left.as_micros().saturated_into();
413		let soft_deadline =
414			now + time::Duration::from_micros(self.soft_deadline_percent.mul_floor(left_micros));
415		let mut skipped = 0;
416		let mut unqueue_invalid = TxInvalidityReportMap::new();
417
418		let delay = deadline.saturating_duration_since((self.now)()) / 8;
419		let mut pending_iterator =
420			self.transaction_pool.ready_at_with_timeout(self.parent_hash, delay).await;
421
422		let block_size_limit = block_size_limit.unwrap_or(self.default_block_size_limit);
423
424		debug!(target: LOG_TARGET, "Attempting to push transactions from the pool at {:?}.", self.parent_hash);
425		let mut transaction_pushed = false;
426
427		let end_reason = loop {
428			let pending_tx = if let Some(pending_tx) = pending_iterator.next() {
429				pending_tx
430			} else {
431				debug!(
432					target: LOG_TARGET,
433					"No more transactions, proceeding with proposing."
434				);
435
436				break EndProposingReason::NoMoreTransactions
437			};
438
439			let now = (self.now)();
440			if now > deadline {
441				debug!(
442					target: LOG_TARGET,
443					"Consensus deadline reached when pushing block transactions, \
444				proceeding with proposing."
445				);
446				break EndProposingReason::HitDeadline
447			}
448
449			let pending_tx_data = (**pending_tx.data()).clone();
450			let pending_tx_hash = pending_tx.hash().clone();
451
452			let block_size =
453				block_builder.estimate_block_size(self.include_proof_in_block_size_estimation);
454			if block_size + pending_tx_data.encoded_size() > block_size_limit {
455				pending_iterator.report_invalid(&pending_tx);
456				if skipped < MAX_SKIPPED_TRANSACTIONS {
457					skipped += 1;
458					debug!(
459						target: LOG_TARGET,
460						"Transaction would overflow the block size limit, \
461					 but will try {} more transactions before quitting.",
462						MAX_SKIPPED_TRANSACTIONS - skipped,
463					);
464					continue
465				} else if now < soft_deadline {
466					debug!(
467						target: LOG_TARGET,
468						"Transaction would overflow the block size limit, \
469					 but we still have time before the soft deadline, so \
470					 we will try a bit more."
471					);
472					continue
473				} else {
474					debug!(
475						target: LOG_TARGET,
476						"Reached block size limit, proceeding with proposing."
477					);
478					break EndProposingReason::HitBlockSizeLimit
479				}
480			}
481
482			trace!(target: LOG_TARGET, "[{:?}] Pushing to the block.", pending_tx_hash);
483			match sc_block_builder::BlockBuilder::push(block_builder, pending_tx_data) {
484				Ok(()) => {
485					transaction_pushed = true;
486					trace!(target: LOG_TARGET, "[{:?}] Pushed to the block.", pending_tx_hash);
487				},
488				Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => {
489					pending_iterator.report_invalid(&pending_tx);
490					if skipped < MAX_SKIPPED_TRANSACTIONS {
491						skipped += 1;
492						debug!(target: LOG_TARGET,
493							"Block seems full, but will try {} more transactions before quitting.",
494							MAX_SKIPPED_TRANSACTIONS - skipped,
495						);
496					} else if (self.now)() < soft_deadline {
497						debug!(target: LOG_TARGET,
498							"Block seems full, but we still have time before the soft deadline, \
499							 so we will try a bit more before quitting."
500						);
501					} else {
502						debug!(
503							target: LOG_TARGET,
504							"Reached block weight limit, proceeding with proposing."
505						);
506						break EndProposingReason::HitBlockWeightLimit
507					}
508				},
509				Err(e) => {
510					pending_iterator.report_invalid(&pending_tx);
511					debug!(
512						target: LOG_TARGET,
513						"[{:?}] Invalid transaction: {} at: {}", pending_tx_hash, e, self.parent_hash
514					);
515
516					let error_to_report = match e {
517						ApplyExtrinsicFailed(Validity(e)) => Some(e),
518						_ => None,
519					};
520
521					unqueue_invalid.insert(pending_tx_hash, error_to_report);
522				},
523			}
524		};
525
526		if matches!(end_reason, EndProposingReason::HitBlockSizeLimit) && !transaction_pushed {
527			warn!(
528				target: LOG_TARGET,
529				"Hit block size limit of `{}` without including any transaction!", block_size_limit,
530			);
531		}
532
533		self.transaction_pool.report_invalid(Some(self.parent_hash), unqueue_invalid);
534		Ok(end_reason)
535	}
536
537	/// Prints a summary and does telemetry + metrics.
538	///
539	/// - `block`: The block that was build.
540	/// - `end_reason`: Why did we stop producing the block?
541	/// - `block_took`: How long did it took to produce the actual block?
542	/// - `propose_took`: How long did the entire proposing took?
543	fn print_summary(
544		&self,
545		block: &Block,
546		end_reason: EndProposingReason,
547		block_took: time::Duration,
548		propose_took: time::Duration,
549	) {
550		let extrinsics = block.extrinsics();
551		self.metrics.report(|metrics| {
552			metrics.number_of_transactions.set(extrinsics.len() as u64);
553			metrics.block_constructed.observe(block_took.as_secs_f64());
554			metrics.report_end_proposing_reason(end_reason);
555			metrics.create_block_proposal_time.observe(propose_took.as_secs_f64());
556		});
557
558		let extrinsics_summary = if extrinsics.is_empty() {
559			"no extrinsics".to_string()
560		} else {
561			format!(
562				"extrinsics ({}): [{}]",
563				extrinsics.len(),
564				extrinsics
565					.iter()
566					.map(|xt| BlakeTwo256::hash_of(xt).to_string())
567					.collect::<Vec<_>>()
568					.join(", ")
569			)
570		};
571
572		if log::log_enabled!(log::Level::Info) {
573			info!(
574				"🎁 Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; extrinsics_count: {}",
575				block.header().number(),
576				block_took.as_millis(),
577				<Block as BlockT>::Hash::from(block.header().hash()),
578				block.header().parent_hash(),
579				end_reason,
580				extrinsics.len()
581			)
582		} else if log::log_enabled!(log::Level::Trace) {
583			trace!(
584				"🎁 Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; {extrinsics_summary}",
585				block.header().number(),
586				block_took.as_millis(),
587				<Block as BlockT>::Hash::from(block.header().hash()),
588				block.header().parent_hash(),
589				end_reason
590			);
591		}
592
593		telemetry!(
594			self.telemetry;
595			CONSENSUS_INFO;
596			"prepared_block_for_proposing";
597			"number" => ?block.header().number(),
598			"hash" => ?<Block as BlockT>::Hash::from(block.header().hash()),
599		);
600	}
601}
602
603#[cfg(test)]
604mod tests {
605	use super::*;
606
607	use futures::executor::block_on;
608	use parking_lot::Mutex;
609	use sc_client_api::{Backend, TrieCacheContext};
610	use sc_transaction_pool::BasicPool;
611	use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionSource};
612	use sp_api::Core;
613	use sp_blockchain::HeaderBackend;
614	use sp_consensus::{BlockOrigin, Environment, Proposer};
615	use sp_runtime::{generic::BlockId, traits::NumberFor, Perbill};
616	use substrate_test_runtime_client::{
617		prelude::*,
618		runtime::{Block as TestBlock, Extrinsic, ExtrinsicBuilder, Transfer},
619		TestClientBuilder, TestClientBuilderExt,
620	};
621
622	const SOURCE: TransactionSource = TransactionSource::External;
623
624	// Note:
625	// Maximum normal extrinsic size for `substrate_test_runtime` is ~65% of max_block (refer to
626	// `substrate_test_runtime::RuntimeBlockWeights` for details).
627	// This extrinsic sizing allows for:
628	// - one huge xts + a lot of tiny dust
629	// - one huge, no medium,
630	// - two medium xts
631	// This is widely exploited in following tests.
632	const HUGE: u32 = 649000000;
633	const MEDIUM: u32 = 250000000;
634	const TINY: u32 = 1000;
635
636	fn extrinsic(nonce: u64) -> Extrinsic {
637		ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)).nonce(nonce).build()
638	}
639
640	fn chain_event<B: BlockT>(header: B::Header) -> ChainEvent<B>
641	where
642		NumberFor<B>: From<u64>,
643	{
644		ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }
645	}
646
647	#[test]
648	fn should_cease_building_block_when_deadline_is_reached() {
649		// given
650		let client = Arc::new(substrate_test_runtime_client::new());
651		let spawner = sp_core::testing::TaskExecutor::new();
652		let txpool = Arc::from(BasicPool::new_full(
653			Default::default(),
654			true.into(),
655			None,
656			spawner.clone(),
657			client.clone(),
658		));
659
660		let hashof0 = client.info().genesis_hash;
661		block_on(txpool.submit_at(hashof0, SOURCE, vec![extrinsic(0), extrinsic(1)])).unwrap();
662
663		block_on(
664			txpool.maintain(chain_event(
665				client.expect_header(hashof0).expect("there should be header"),
666			)),
667		);
668
669		let mut proposer_factory =
670			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
671
672		let cell = Mutex::new((false, time::Instant::now()));
673		let proposer = proposer_factory.init_with_now(
674			&client.expect_header(hashof0).unwrap(),
675			Box::new(move || {
676				let mut value = cell.lock();
677				if !value.0 {
678					value.0 = true;
679					return value.1
680				}
681				let old = value.1;
682				let new = old + time::Duration::from_secs(1);
683				*value = (true, new);
684				old
685			}),
686		);
687
688		// when
689		let deadline = time::Duration::from_secs(3);
690		let block =
691			block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
692				.map(|r| r.block)
693				.unwrap();
694
695		// then
696		// block should have some extrinsics although we have some more in the pool.
697		assert_eq!(block.extrinsics().len(), 1);
698		assert_eq!(txpool.ready().count(), 2);
699	}
700
701	#[test]
702	fn should_not_panic_when_deadline_is_reached() {
703		let client = Arc::new(substrate_test_runtime_client::new());
704		let spawner = sp_core::testing::TaskExecutor::new();
705		let txpool = Arc::from(BasicPool::new_full(
706			Default::default(),
707			true.into(),
708			None,
709			spawner.clone(),
710			client.clone(),
711		));
712
713		let mut proposer_factory =
714			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
715
716		let cell = Mutex::new((false, time::Instant::now()));
717		let proposer = proposer_factory.init_with_now(
718			&client.expect_header(client.info().genesis_hash).unwrap(),
719			Box::new(move || {
720				let mut value = cell.lock();
721				if !value.0 {
722					value.0 = true;
723					return value.1
724				}
725				let new = value.1 + time::Duration::from_secs(160);
726				*value = (true, new);
727				new
728			}),
729		);
730
731		let deadline = time::Duration::from_secs(1);
732		block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
733			.map(|r| r.block)
734			.unwrap();
735	}
736
737	#[test]
738	fn proposed_storage_changes_should_match_execute_block_storage_changes() {
739		let (client, backend) = TestClientBuilder::new().build_with_backend();
740		let client = Arc::new(client);
741		let spawner = sp_core::testing::TaskExecutor::new();
742		let txpool = Arc::from(BasicPool::new_full(
743			Default::default(),
744			true.into(),
745			None,
746			spawner.clone(),
747			client.clone(),
748		));
749
750		let genesis_hash = client.info().best_hash;
751
752		block_on(txpool.submit_at(genesis_hash, SOURCE, vec![extrinsic(0)])).unwrap();
753
754		block_on(
755			txpool.maintain(chain_event(
756				client
757					.expect_header(client.info().genesis_hash)
758					.expect("there should be header"),
759			)),
760		);
761
762		let mut proposer_factory =
763			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
764
765		let proposer = proposer_factory.init_with_now(
766			&client.header(genesis_hash).unwrap().unwrap(),
767			Box::new(move || time::Instant::now()),
768		);
769
770		let deadline = time::Duration::from_secs(9);
771		let proposal =
772			block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
773				.unwrap();
774
775		assert_eq!(proposal.block.extrinsics().len(), 1);
776
777		let api = client.runtime_api();
778		api.execute_block(genesis_hash, proposal.block).unwrap();
779
780		let state = backend.state_at(genesis_hash, TrieCacheContext::Untrusted).unwrap();
781
782		let storage_changes = api.into_storage_changes(&state, genesis_hash).unwrap();
783
784		assert_eq!(
785			proposal.storage_changes.transaction_storage_root,
786			storage_changes.transaction_storage_root,
787		);
788	}
789
790	// This test ensures that if one transaction of a user was rejected, because for example
791	// the weight limit was hit, we don't mark the other transactions of the user as invalid because
792	// the nonce is not matching.
793	#[test]
794	fn should_not_remove_invalid_transactions_from_the_same_sender_after_one_was_invalid() {
795		// given
796		let client = Arc::new(substrate_test_runtime_client::new());
797		let spawner = sp_core::testing::TaskExecutor::new();
798		let txpool = Arc::from(BasicPool::new_full(
799			Default::default(),
800			true.into(),
801			None,
802			spawner.clone(),
803			client.clone(),
804		));
805
806		let medium = |nonce| {
807			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(MEDIUM))
808				.nonce(nonce)
809				.build()
810		};
811		let huge = |nonce| {
812			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE)).nonce(nonce).build()
813		};
814
815		block_on(txpool.submit_at(
816			client.info().genesis_hash,
817			SOURCE,
818			vec![medium(0), medium(1), huge(2), medium(3), huge(4), medium(5), medium(6)],
819		))
820		.unwrap();
821
822		let mut proposer_factory =
823			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
824		let mut propose_block = |client: &TestClient,
825		                         parent_number,
826		                         expected_block_extrinsics,
827		                         expected_pool_transactions| {
828			let hash = client.expect_block_hash_from_id(&BlockId::Number(parent_number)).unwrap();
829			let proposer = proposer_factory.init_with_now(
830				&client.expect_header(hash).unwrap(),
831				Box::new(move || time::Instant::now()),
832			);
833
834			// when
835			let deadline = time::Duration::from_secs(900);
836			let block =
837				block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
838					.map(|r| r.block)
839					.unwrap();
840
841			// then
842			// block should have some extrinsics although we have some more in the pool.
843			assert_eq!(
844				txpool.ready().count(),
845				expected_pool_transactions,
846				"at block: {}",
847				block.header.number
848			);
849			assert_eq!(
850				block.extrinsics().len(),
851				expected_block_extrinsics,
852				"at block: {}",
853				block.header.number
854			);
855
856			block
857		};
858
859		let import_and_maintain = |client: Arc<TestClient>, block: TestBlock| {
860			let hash = block.hash();
861			block_on(client.import(BlockOrigin::Own, block)).unwrap();
862			block_on(txpool.maintain(chain_event(
863				client.expect_header(hash).expect("there should be header"),
864			)));
865		};
866
867		block_on(
868			txpool.maintain(chain_event(
869				client
870					.expect_header(client.info().genesis_hash)
871					.expect("there should be header"),
872			)),
873		);
874		assert_eq!(txpool.ready().count(), 7);
875
876		// let's create one block and import it
877		let block = propose_block(&client, 0, 2, 7);
878		import_and_maintain(client.clone(), block.clone());
879		assert_eq!(txpool.ready().count(), 5);
880
881		// now let's make sure that we can still make some progress
882		let block = propose_block(&client, 1, 1, 5);
883		import_and_maintain(client.clone(), block.clone());
884		assert_eq!(txpool.ready().count(), 4);
885
886		// again let's make sure that we can still make some progress
887		let block = propose_block(&client, 2, 1, 4);
888		import_and_maintain(client.clone(), block.clone());
889		assert_eq!(txpool.ready().count(), 3);
890
891		// again let's make sure that we can still make some progress
892		let block = propose_block(&client, 3, 1, 3);
893		import_and_maintain(client.clone(), block.clone());
894		assert_eq!(txpool.ready().count(), 2);
895
896		// again let's make sure that we can still make some progress
897		let block = propose_block(&client, 4, 2, 2);
898		import_and_maintain(client.clone(), block.clone());
899		assert_eq!(txpool.ready().count(), 0);
900	}
901
902	#[test]
903	fn should_cease_building_block_when_block_limit_is_reached() {
904		let client = Arc::new(substrate_test_runtime_client::new());
905		let spawner = sp_core::testing::TaskExecutor::new();
906		let txpool = Arc::from(BasicPool::new_full(
907			Default::default(),
908			true.into(),
909			None,
910			spawner.clone(),
911			client.clone(),
912		));
913		let genesis_hash = client.info().genesis_hash;
914		let genesis_header = client.expect_header(genesis_hash).expect("there should be header");
915
916		let extrinsics_num = 5;
917		let extrinsics = std::iter::once(
918			Transfer {
919				from: Sr25519Keyring::Alice.into(),
920				to: Sr25519Keyring::Bob.into(),
921				amount: 100,
922				nonce: 0,
923			}
924			.into_unchecked_extrinsic(),
925		)
926		.chain((1..extrinsics_num as u64).map(extrinsic))
927		.collect::<Vec<_>>();
928
929		let block_limit = genesis_header.encoded_size() +
930			extrinsics
931				.iter()
932				.take(extrinsics_num - 1)
933				.map(Encode::encoded_size)
934				.sum::<usize>() +
935			Vec::<Extrinsic>::new().encoded_size();
936
937		block_on(txpool.submit_at(genesis_hash, SOURCE, extrinsics.clone())).unwrap();
938
939		block_on(txpool.maintain(chain_event(genesis_header.clone())));
940
941		let mut proposer_factory =
942			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
943
944		let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
945
946		// Give it enough time
947		let deadline = time::Duration::from_secs(300);
948		let block = block_on(proposer.propose(
949			Default::default(),
950			Default::default(),
951			deadline,
952			Some(block_limit),
953		))
954		.map(|r| r.block)
955		.unwrap();
956
957		// Based on the block limit, one transaction shouldn't be included.
958		assert_eq!(block.extrinsics().len(), extrinsics_num - 1);
959
960		let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
961
962		let block =
963			block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
964				.map(|r| r.block)
965				.unwrap();
966
967		// Without a block limit we should include all of them
968		assert_eq!(block.extrinsics().len(), extrinsics_num);
969
970		let mut proposer_factory = ProposerFactory::with_proof_recording(
971			spawner.clone(),
972			client.clone(),
973			txpool.clone(),
974			None,
975			None,
976		);
977
978		let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
979
980		// Exact block_limit, which includes:
981		// 99 (header_size) + 718 (proof@initialize_block) + 246 (one Transfer extrinsic)
982		let block_limit = {
983			let builder = BlockBuilderBuilder::new(&*client)
984				.on_parent_block(genesis_header.hash())
985				.with_parent_block_number(0)
986				.enable_proof_recording()
987				.build()
988				.unwrap();
989			builder.estimate_block_size(true) + extrinsics[0].encoded_size()
990		};
991		let block = block_on(proposer.propose(
992			Default::default(),
993			Default::default(),
994			deadline,
995			Some(block_limit),
996		))
997		.map(|r| r.block)
998		.unwrap();
999
1000		// The block limit was increased, but we now include the proof in the estimation of the
1001		// block size and thus, only the `Transfer` will fit into the block. It reads more data
1002		// than we have reserved in the block limit.
1003		assert_eq!(block.extrinsics().len(), 1);
1004	}
1005
1006	#[test]
1007	fn should_keep_adding_transactions_after_exhausts_resources_before_soft_deadline() {
1008		// given
1009		let client = Arc::new(substrate_test_runtime_client::new());
1010		let spawner = sp_core::testing::TaskExecutor::new();
1011		let txpool = Arc::from(BasicPool::new_full(
1012			Default::default(),
1013			true.into(),
1014			None,
1015			spawner.clone(),
1016			client.clone(),
1017		));
1018		let genesis_hash = client.info().genesis_hash;
1019
1020		let tiny = |nonce| {
1021			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)).nonce(nonce).build()
1022		};
1023		let huge = |who| {
1024			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE))
1025				.signer(Sr25519Keyring::numeric(who))
1026				.build()
1027		};
1028
1029		block_on(
1030			txpool.submit_at(
1031				genesis_hash,
1032				SOURCE,
1033				// add 2 * MAX_SKIPPED_TRANSACTIONS that exhaust resources
1034				(0..MAX_SKIPPED_TRANSACTIONS * 2)
1035					.into_iter()
1036					.map(huge)
1037					// and some transactions that are okay.
1038					.chain((0..MAX_SKIPPED_TRANSACTIONS as u64).into_iter().map(tiny))
1039					.collect(),
1040			),
1041		)
1042		.unwrap();
1043
1044		block_on(txpool.maintain(chain_event(
1045			client.expect_header(genesis_hash).expect("there should be header"),
1046		)));
1047		assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 3);
1048
1049		let mut proposer_factory =
1050			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
1051
1052		let cell = Mutex::new(time::Instant::now());
1053		let proposer = proposer_factory.init_with_now(
1054			&client.expect_header(genesis_hash).unwrap(),
1055			Box::new(move || {
1056				let mut value = cell.lock();
1057				let old = *value;
1058				*value = old + time::Duration::from_secs(1);
1059				old
1060			}),
1061		);
1062
1063		// when
1064		// give it enough time so that deadline is never triggered.
1065		let deadline = time::Duration::from_secs(900);
1066		let block =
1067			block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
1068				.map(|r| r.block)
1069				.unwrap();
1070
1071		// then block should have all non-exhaust resources extrinsics (+ the first one).
1072		assert_eq!(block.extrinsics().len(), MAX_SKIPPED_TRANSACTIONS + 1);
1073	}
1074
1075	#[test]
1076	fn should_only_skip_up_to_some_limit_after_soft_deadline() {
1077		// given
1078		let client = Arc::new(substrate_test_runtime_client::new());
1079		let spawner = sp_core::testing::TaskExecutor::new();
1080		let txpool = Arc::from(BasicPool::new_full(
1081			Default::default(),
1082			true.into(),
1083			None,
1084			spawner.clone(),
1085			client.clone(),
1086		));
1087		let genesis_hash = client.info().genesis_hash;
1088
1089		let tiny = |who| {
1090			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY))
1091				.signer(Sr25519Keyring::numeric(who))
1092				.nonce(1)
1093				.build()
1094		};
1095		let huge = |who| {
1096			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE))
1097				.signer(Sr25519Keyring::numeric(who))
1098				.build()
1099		};
1100
1101		block_on(
1102			txpool.submit_at(
1103				genesis_hash,
1104				SOURCE,
1105				(0..MAX_SKIPPED_TRANSACTIONS + 2)
1106					.into_iter()
1107					.map(huge)
1108					// and some transactions that are okay.
1109					.chain((0..MAX_SKIPPED_TRANSACTIONS + 2).into_iter().map(tiny))
1110					.collect(),
1111			),
1112		)
1113		.unwrap();
1114
1115		block_on(txpool.maintain(chain_event(
1116			client.expect_header(genesis_hash).expect("there should be header"),
1117		)));
1118		assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 2 + 4);
1119
1120		let mut proposer_factory =
1121			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
1122
1123		let deadline = time::Duration::from_secs(600);
1124		let cell = Arc::new(Mutex::new((0, time::Instant::now())));
1125		let cell2 = cell.clone();
1126		let proposer = proposer_factory.init_with_now(
1127			&client.expect_header(genesis_hash).unwrap(),
1128			Box::new(move || {
1129				let mut value = cell.lock();
1130				let (called, old) = *value;
1131				// add time after deadline is calculated internally (hence 1)
1132				let increase = if called == 1 {
1133					// we start after the soft_deadline should have already been reached.
1134					deadline / 2
1135				} else {
1136					// but we make sure to never reach the actual deadline
1137					time::Duration::from_millis(0)
1138				};
1139				*value = (called + 1, old + increase);
1140				old
1141			}),
1142		);
1143
1144		let block =
1145			block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
1146				.map(|r| r.block)
1147				.unwrap();
1148
1149		// then the block should have one or two transactions. This maybe random as they are
1150		// processed in parallel. The same signer and consecutive nonces for huge and tiny
1151		// transactions guarantees that max two transactions will get to the block.
1152		assert!(
1153			(1..3).contains(&block.extrinsics().len()),
1154			"Block shall contain one or two extrinsics."
1155		);
1156		assert!(
1157			cell2.lock().0 > MAX_SKIPPED_TRANSACTIONS,
1158			"Not enough calls to current time, which indicates the test might have ended because of deadline, not soft deadline"
1159		);
1160	}
1161}