Skip to main content

sc_basic_authorship/
basic_authorship.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19//! A consensus proposer for "basic" chains which use the primitive inherent-data.
20
21// FIXME #1021 move this into sp-consensus
22
23use codec::Encode;
24use futures::{
25	channel::oneshot,
26	future,
27	future::{Future, FutureExt},
28};
29use log::{debug, error, info, log_enabled, trace, warn, Level};
30use prometheus_endpoint::Registry as PrometheusRegistry;
31use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder};
32use sc_proposer_metrics::{EndProposingReason, MetricsLink as PrometheusMetrics};
33use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO};
34use sc_transaction_pool_api::{InPoolTransaction, TransactionPool, TxInvalidityReportMap};
35use sp_api::{ApiExt, CallApiAt, ProvideRuntimeApi};
36use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend};
37use sp_consensus::{Proposal, ProposeArgs};
38use sp_core::traits::SpawnNamed;
39use sp_inherents::InherentData;
40use sp_runtime::{
41	traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT},
42	ExtrinsicInclusionMode, Percent, SaturatedConversion,
43};
44use std::{pin::Pin, sync::Arc, time};
45
46/// Default block size limit in bytes used by [`Proposer`].
47///
48/// Can be overwritten by [`ProposerFactory::set_default_block_size_limit`].
49///
50/// Be aware that there is also an upper packet size on what the networking code
51/// will accept. If the block doesn't fit in such a package, it can not be
52/// transferred to other nodes.
53pub const DEFAULT_BLOCK_SIZE_LIMIT: usize = 4 * 1024 * 1024 + 512;
54
55const DEFAULT_SOFT_DEADLINE_PERCENT: Percent = Percent::from_percent(50);
56
57const LOG_TARGET: &'static str = "basic-authorship";
58
59/// [`Proposer`] factory.
60pub struct ProposerFactory<A, C> {
61	spawn_handle: Box<dyn SpawnNamed>,
62	/// The client instance.
63	client: Arc<C>,
64	/// The transaction pool.
65	transaction_pool: Arc<A>,
66	/// Prometheus Link,
67	metrics: PrometheusMetrics,
68	/// The default block size limit.
69	///
70	/// If no `block_size_limit` is passed to [`sp_consensus::Proposer::propose`], this block size
71	/// limit will be used.
72	default_block_size_limit: usize,
73	/// Soft deadline percentage of hard deadline.
74	///
75	/// The value is used to compute soft deadline during block production.
76	/// The soft deadline indicates where we should stop attempting to add transactions
77	/// to the block, which exhaust resources. After soft deadline is reached,
78	/// we switch to a fixed-amount mode, in which after we see `MAX_SKIPPED_TRANSACTIONS`
79	/// transactions which exhaust resources, we will conclude that the block is full.
80	soft_deadline_percent: Percent,
81	telemetry: Option<TelemetryHandle>,
82}
83
84impl<A, C> Clone for ProposerFactory<A, C> {
85	fn clone(&self) -> Self {
86		Self {
87			spawn_handle: self.spawn_handle.clone(),
88			client: self.client.clone(),
89			transaction_pool: self.transaction_pool.clone(),
90			metrics: self.metrics.clone(),
91			default_block_size_limit: self.default_block_size_limit,
92			soft_deadline_percent: self.soft_deadline_percent,
93			telemetry: self.telemetry.clone(),
94		}
95	}
96}
97
98impl<A, C> ProposerFactory<A, C> {
99	/// Create a new proposer factory.
100	pub fn new(
101		spawn_handle: impl SpawnNamed + 'static,
102		client: Arc<C>,
103		transaction_pool: Arc<A>,
104		prometheus: Option<&PrometheusRegistry>,
105		telemetry: Option<TelemetryHandle>,
106	) -> Self {
107		ProposerFactory {
108			spawn_handle: Box::new(spawn_handle),
109			transaction_pool,
110			metrics: PrometheusMetrics::new(prometheus),
111			default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT,
112			soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT,
113			telemetry,
114			client,
115		}
116	}
117
118	/// Deprecated, use [`Self::new`] instead.
119	#[deprecated(note = "Proof recording is now handled differently. Use `new` instead.")]
120	pub fn with_proof_recording(
121		spawn_handle: impl SpawnNamed + 'static,
122		client: Arc<C>,
123		transaction_pool: Arc<A>,
124		prometheus: Option<&PrometheusRegistry>,
125		telemetry: Option<TelemetryHandle>,
126	) -> Self {
127		Self::new(spawn_handle, client, transaction_pool, prometheus, telemetry)
128	}
129
130	/// Set the default block size limit in bytes.
131	///
132	/// The default value for the block size limit is:
133	/// [`DEFAULT_BLOCK_SIZE_LIMIT`].
134	///
135	/// If there is no block size limit passed to [`sp_consensus::Proposer::propose`], this value
136	/// will be used.
137	pub fn set_default_block_size_limit(&mut self, limit: usize) {
138		self.default_block_size_limit = limit;
139	}
140
141	/// Set soft deadline percentage.
142	///
143	/// The value is used to compute soft deadline during block production.
144	/// The soft deadline indicates where we should stop attempting to add transactions
145	/// to the block, which exhaust resources. After soft deadline is reached,
146	/// we switch to a fixed-amount mode, in which after we see `MAX_SKIPPED_TRANSACTIONS`
147	/// transactions which exhaust resources, we will conclude that the block is full.
148	///
149	/// Setting the value too low will significantly limit the amount of transactions
150	/// we try in case they exhaust resources. Setting the value too high can
151	/// potentially open a DoS vector, where many "exhaust resources" transactions
152	/// are being tried with no success, hence block producer ends up creating an empty block.
153	pub fn set_soft_deadline(&mut self, percent: Percent) {
154		self.soft_deadline_percent = percent;
155	}
156}
157
158impl<Block, C, A> ProposerFactory<A, C>
159where
160	A: TransactionPool<Block = Block> + 'static,
161	Block: BlockT,
162	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + Send + Sync + 'static,
163	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
164{
165	fn init_with_now(
166		&mut self,
167		parent_header: &<Block as BlockT>::Header,
168		now: Box<dyn Fn() -> time::Instant + Send + Sync>,
169	) -> Proposer<Block, C, A> {
170		let parent_hash = parent_header.hash();
171
172		info!(
173			"🙌 Starting consensus session on top of parent {:?} (#{})",
174			parent_hash,
175			parent_header.number()
176		);
177
178		let proposer = Proposer::<_, _, _> {
179			spawn_handle: self.spawn_handle.clone(),
180			client: self.client.clone(),
181			parent_hash,
182			parent_number: *parent_header.number(),
183			transaction_pool: self.transaction_pool.clone(),
184			now,
185			metrics: self.metrics.clone(),
186			default_block_size_limit: self.default_block_size_limit,
187			soft_deadline_percent: self.soft_deadline_percent,
188			telemetry: self.telemetry.clone(),
189		};
190
191		proposer
192	}
193}
194
195impl<A, Block, C> sp_consensus::Environment<Block> for ProposerFactory<A, C>
196where
197	A: TransactionPool<Block = Block> + 'static,
198	Block: BlockT,
199	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
200	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
201{
202	type CreateProposer = future::Ready<Result<Self::Proposer, Self::Error>>;
203	type Proposer = Proposer<Block, C, A>;
204	type Error = sp_blockchain::Error;
205
206	fn init(&mut self, parent_header: &<Block as BlockT>::Header) -> Self::CreateProposer {
207		future::ready(Ok(self.init_with_now(parent_header, Box::new(time::Instant::now))))
208	}
209}
210
211/// The proposer logic.
212pub struct Proposer<Block: BlockT, C, A: TransactionPool> {
213	spawn_handle: Box<dyn SpawnNamed>,
214	client: Arc<C>,
215	parent_hash: Block::Hash,
216	parent_number: <<Block as BlockT>::Header as HeaderT>::Number,
217	transaction_pool: Arc<A>,
218	now: Box<dyn Fn() -> time::Instant + Send + Sync>,
219	metrics: PrometheusMetrics,
220	default_block_size_limit: usize,
221	soft_deadline_percent: Percent,
222	telemetry: Option<TelemetryHandle>,
223}
224
225impl<A, Block, C> sp_consensus::Proposer<Block> for Proposer<Block, C, A>
226where
227	A: TransactionPool<Block = Block> + 'static,
228	Block: BlockT,
229	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
230	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
231{
232	type Proposal = Pin<Box<dyn Future<Output = Result<Proposal<Block>, Self::Error>> + Send>>;
233	type Error = sp_blockchain::Error;
234
235	fn propose(self, args: ProposeArgs<Block>) -> Self::Proposal {
236		Self::propose_block(self, args).boxed()
237	}
238}
239
240/// If the block is full we will attempt to push at most
241/// this number of transactions before quitting for real.
242/// It allows us to increase block utilization.
243const MAX_SKIPPED_TRANSACTIONS: usize = 8;
244
245impl<A, Block, C> Proposer<Block, C, A>
246where
247	A: TransactionPool<Block = Block> + 'static,
248	Block: BlockT,
249	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
250	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
251{
252	/// Propose a new block.
253	pub async fn propose_block(
254		self,
255		args: ProposeArgs<Block>,
256	) -> Result<Proposal<Block>, sp_blockchain::Error> {
257		let (tx, rx) = oneshot::channel();
258		let spawn_handle = self.spawn_handle.clone();
259
260		// Spawn on a new thread, because block production is a blocking operation.
261		spawn_handle.spawn_blocking(
262			"basic-authorship-proposer",
263			None,
264			async move {
265				let res = self.propose_with(args).await;
266				if tx.send(res).is_err() {
267					trace!(
268						target: LOG_TARGET,
269						"Could not send block production result to proposer!"
270					);
271				}
272			}
273			.boxed(),
274		);
275
276		rx.await?.map_err(Into::into)
277	}
278
279	async fn propose_with(
280		self,
281		args: ProposeArgs<Block>,
282	) -> Result<Proposal<Block>, sp_blockchain::Error> {
283		let ProposeArgs {
284			inherent_data,
285			inherent_digests,
286			max_duration,
287			block_size_limit,
288			storage_proof_recorder,
289			extra_extensions,
290		} = args;
291		// leave some time for evaluation and block finalization (10%)
292		let deadline = (self.now)() + max_duration - max_duration / 10;
293		let block_timer = time::Instant::now();
294
295		let mut block_builder = BlockBuilderBuilder::new(&*self.client)
296			.on_parent_block(self.parent_hash)
297			.with_parent_block_number(self.parent_number)
298			.with_proof_recorder(storage_proof_recorder)
299			.with_inherent_digests(inherent_digests)
300			.with_extra_extensions(extra_extensions)
301			.build()?;
302
303		self.apply_inherents(&mut block_builder, inherent_data)?;
304
305		let mode = block_builder.extrinsic_inclusion_mode();
306		let end_reason = match mode {
307			ExtrinsicInclusionMode::AllExtrinsics => {
308				self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?
309			},
310			ExtrinsicInclusionMode::OnlyInherents => EndProposingReason::TransactionForbidden,
311		};
312		let (block, storage_changes) = block_builder.build()?.into_inner();
313		let block_took = block_timer.elapsed();
314
315		self.print_summary(&block, end_reason, block_took, block_timer.elapsed());
316		Ok(Proposal { block, storage_changes })
317	}
318
319	/// Apply all inherents to the block.
320	fn apply_inherents(
321		&self,
322		block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C>,
323		inherent_data: InherentData,
324	) -> Result<(), sp_blockchain::Error> {
325		let create_inherents_start = time::Instant::now();
326
327		let inherent_identifiers = log_enabled!(target: LOG_TARGET, Level::Debug).then(|| {
328			inherent_data
329				.identifiers()
330				.map(|id| String::from_utf8_lossy(id).to_string())
331				.collect::<Vec<String>>()
332		});
333
334		let inherents = block_builder.create_inherents(inherent_data)?;
335		let create_inherents_end = time::Instant::now();
336
337		debug!(target: LOG_TARGET, "apply_inherents: Runtime provided {} inherents. Inherent identifiers present: {:?}", inherents.len(), inherent_identifiers);
338
339		self.metrics.report(|metrics| {
340			metrics.create_inherents_time.observe(
341				create_inherents_end
342					.saturating_duration_since(create_inherents_start)
343					.as_secs_f64(),
344			);
345		});
346
347		for inherent in inherents {
348			match block_builder.push(inherent) {
349				Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => {
350					warn!(
351						target: LOG_TARGET,
352						"⚠️  Dropping non-mandatory inherent from overweight block."
353					)
354				},
355				Err(ApplyExtrinsicFailed(Validity(e))) if e.was_mandatory() => {
356					error!(
357						"❌️ Mandatory inherent extrinsic returned error. Block cannot be produced."
358					);
359					return Err(ApplyExtrinsicFailed(Validity(e)));
360				},
361				Err(e) => {
362					warn!(
363						target: LOG_TARGET,
364						"❗️ Inherent extrinsic returned unexpected error: {}. Dropping.", e
365					);
366				},
367				Ok(_) => {},
368			}
369		}
370		Ok(())
371	}
372
373	/// Apply as many extrinsics as possible to the block.
374	async fn apply_extrinsics(
375		&self,
376		block_builder: &mut sc_block_builder::BlockBuilder<'_, Block, C>,
377		deadline: time::Instant,
378		block_size_limit: Option<usize>,
379	) -> Result<EndProposingReason, sp_blockchain::Error> {
380		// proceed with transactions
381		// We calculate soft deadline used only in case we start skipping transactions.
382		let now = (self.now)();
383		let left = deadline.saturating_duration_since(now);
384		let left_micros: u64 = left.as_micros().saturated_into();
385		let soft_deadline =
386			now + time::Duration::from_micros(self.soft_deadline_percent.mul_floor(left_micros));
387		let mut skipped = 0;
388		let mut unqueue_invalid = TxInvalidityReportMap::new();
389		let mut limit_hit_reason: Option<EndProposingReason> = None;
390
391		let delay = deadline.saturating_duration_since((self.now)()) / 8;
392		let mut pending_iterator =
393			self.transaction_pool.ready_at_with_timeout(self.parent_hash, delay).await;
394
395		let block_size_limit = block_size_limit.unwrap_or(self.default_block_size_limit);
396
397		debug!(target: LOG_TARGET, "Attempting to push transactions from the pool at {:?}.", self.parent_hash);
398		let mut transaction_pushed = false;
399
400		let end_reason = loop {
401			let pending_tx = if let Some(pending_tx) = pending_iterator.next() {
402				pending_tx
403			} else {
404				debug!(
405					target: LOG_TARGET,
406					"No more transactions, proceeding with proposing."
407				);
408
409				break limit_hit_reason.unwrap_or(EndProposingReason::NoMoreTransactions);
410			};
411
412			let now = (self.now)();
413			if now > deadline {
414				debug!(
415					target: LOG_TARGET,
416					"Consensus deadline reached when pushing block transactions, \
417				proceeding with proposing."
418				);
419				break limit_hit_reason.unwrap_or(EndProposingReason::HitDeadline);
420			}
421
422			let pending_tx_data = (**pending_tx.data()).clone();
423			let pending_tx_hash = pending_tx.hash().clone();
424
425			let block_size = block_builder.estimate_block_size();
426			if block_size + pending_tx_data.encoded_size() > block_size_limit {
427				pending_iterator.report_invalid(&pending_tx);
428				limit_hit_reason = Some(EndProposingReason::HitBlockSizeLimit);
429				if skipped < MAX_SKIPPED_TRANSACTIONS {
430					skipped += 1;
431					debug!(
432						target: LOG_TARGET,
433						"Transaction would overflow the block size limit, \
434					 but will try {} more transactions before quitting.",
435						MAX_SKIPPED_TRANSACTIONS - skipped,
436					);
437					continue;
438				} else if now < soft_deadline {
439					debug!(
440						target: LOG_TARGET,
441						"Transaction would overflow the block size limit, \
442					 but we still have time before the soft deadline, so \
443					 we will try a bit more."
444					);
445					continue;
446				} else {
447					debug!(
448						target: LOG_TARGET,
449						"Reached block size limit, proceeding with proposing."
450					);
451					break EndProposingReason::HitBlockSizeLimit;
452				}
453			}
454
455			trace!(target: LOG_TARGET, "[{:?}] Pushing to the block.", pending_tx_hash);
456			match sc_block_builder::BlockBuilder::push(block_builder, pending_tx_data) {
457				Ok(()) => {
458					transaction_pushed = true;
459					limit_hit_reason = None;
460					trace!(target: LOG_TARGET, "[{:?}] Pushed to the block.", pending_tx_hash);
461				},
462				Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => {
463					pending_iterator.report_invalid(&pending_tx);
464					limit_hit_reason = Some(EndProposingReason::HitBlockWeightLimit);
465					if skipped < MAX_SKIPPED_TRANSACTIONS {
466						skipped += 1;
467						debug!(target: LOG_TARGET,
468							"Block seems full, but will try {} more transactions before quitting.",
469							MAX_SKIPPED_TRANSACTIONS - skipped,
470						);
471					} else if (self.now)() < soft_deadline {
472						debug!(target: LOG_TARGET,
473							"Block seems full, but we still have time before the soft deadline, \
474							 so we will try a bit more before quitting."
475						);
476					} else {
477						debug!(
478							target: LOG_TARGET,
479							"Reached block weight limit, proceeding with proposing."
480						);
481						break EndProposingReason::HitBlockWeightLimit;
482					}
483				},
484				Err(e) => {
485					pending_iterator.report_invalid(&pending_tx);
486					debug!(
487						target: LOG_TARGET,
488						"[{:?}] Invalid transaction: {} at: {}", pending_tx_hash, e, self.parent_hash
489					);
490
491					let error_to_report = match e {
492						ApplyExtrinsicFailed(Validity(e)) => Some(e),
493						_ => None,
494					};
495
496					unqueue_invalid.insert(pending_tx_hash, error_to_report);
497				},
498			}
499		};
500
501		if matches!(end_reason, EndProposingReason::HitBlockSizeLimit) && !transaction_pushed {
502			warn!(
503				target: LOG_TARGET,
504				"Hit block size limit of `{}` without including any transaction!", block_size_limit,
505			);
506		}
507
508		self.transaction_pool
509			.report_invalid(Some(self.parent_hash), unqueue_invalid)
510			.await;
511		Ok(end_reason)
512	}
513
514	/// Prints a summary and does telemetry + metrics.
515	///
516	/// - `block`: The block that was build.
517	/// - `end_reason`: Why did we stop producing the block?
518	/// - `block_took`: How long did it took to produce the actual block?
519	/// - `propose_took`: How long did the entire proposing took?
520	fn print_summary(
521		&self,
522		block: &Block,
523		end_reason: EndProposingReason,
524		block_took: time::Duration,
525		propose_took: time::Duration,
526	) {
527		let extrinsics = block.extrinsics();
528		self.metrics.report(|metrics| {
529			metrics.number_of_transactions.set(extrinsics.len() as u64);
530			metrics.block_constructed.observe(block_took.as_secs_f64());
531			metrics.report_end_proposing_reason(end_reason);
532			metrics.create_block_proposal_time.observe(propose_took.as_secs_f64());
533		});
534
535		let extrinsics_summary = if extrinsics.is_empty() {
536			"no extrinsics".to_string()
537		} else {
538			format!(
539				"extrinsics ({}): [{}]",
540				extrinsics.len(),
541				extrinsics
542					.iter()
543					.map(|xt| BlakeTwo256::hash_of(xt).to_string())
544					.collect::<Vec<_>>()
545					.join(", ")
546			)
547		};
548
549		if log::log_enabled!(log::Level::Info) {
550			info!(
551				"🎁 Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; extrinsics_count: {}",
552				block.header().number(),
553				block_took.as_millis(),
554				<Block as BlockT>::Hash::from(block.header().hash()),
555				block.header().parent_hash(),
556				end_reason,
557				extrinsics.len()
558			)
559		} else if log::log_enabled!(log::Level::Trace) {
560			trace!(
561				"🎁 Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; {extrinsics_summary}",
562				block.header().number(),
563				block_took.as_millis(),
564				<Block as BlockT>::Hash::from(block.header().hash()),
565				block.header().parent_hash(),
566				end_reason
567			);
568		}
569
570		telemetry!(
571			self.telemetry;
572			CONSENSUS_INFO;
573			"prepared_block_for_proposing";
574			"number" => ?block.header().number(),
575			"hash" => ?<Block as BlockT>::Hash::from(block.header().hash()),
576		);
577	}
578}
579
580#[cfg(test)]
581mod tests {
582	use super::*;
583	use futures::executor::block_on;
584	use parking_lot::Mutex;
585	use sc_client_api::{Backend, TrieCacheContext};
586	use sc_transaction_pool::BasicPool;
587	use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionSource};
588	use sp_api::Core;
589	use sp_blockchain::HeaderBackend;
590	use sp_consensus::{BlockOrigin, Environment};
591	use sp_runtime::{generic::BlockId, traits::NumberFor, Perbill};
592	use substrate_test_runtime_client::{
593		prelude::*,
594		runtime::{Block as TestBlock, Extrinsic, ExtrinsicBuilder, Transfer},
595		TestClientBuilder, TestClientBuilderExt,
596	};
597
598	const SOURCE: TransactionSource = TransactionSource::External;
599
600	// Note:
601	// Maximum normal extrinsic size for `substrate_test_runtime` is ~65% of max_block (refer to
602	// `substrate_test_runtime::RuntimeBlockWeights` for details).
603	// This extrinsic sizing allows for:
604	// - one huge xts + a lot of tiny dust
605	// - one huge, no medium,
606	// - two medium xts
607	// This is widely exploited in following tests.
608	const HUGE: u32 = 649000000;
609	const MEDIUM: u32 = 250000000;
610	const TINY: u32 = 1000;
611
612	fn extrinsic(nonce: u64) -> Extrinsic {
613		ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)).nonce(nonce).build()
614	}
615
616	fn chain_event<B: BlockT>(header: B::Header) -> ChainEvent<B>
617	where
618		NumberFor<B>: From<u64>,
619	{
620		ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }
621	}
622
623	#[test]
624	fn should_cease_building_block_when_deadline_is_reached() {
625		// given
626		let client = Arc::new(substrate_test_runtime_client::new());
627		let spawner = sp_core::testing::TaskExecutor::new();
628		let txpool = Arc::from(BasicPool::new_full(
629			Default::default(),
630			true.into(),
631			None,
632			spawner.clone(),
633			client.clone(),
634		));
635
636		let hashof0 = client.info().genesis_hash;
637		block_on(txpool.submit_at(hashof0, SOURCE, vec![extrinsic(0), extrinsic(1)])).unwrap();
638
639		block_on(
640			txpool.maintain(chain_event(
641				client.expect_header(hashof0).expect("there should be header"),
642			)),
643		);
644
645		let mut proposer_factory =
646			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
647
648		let cell = Mutex::new((false, time::Instant::now()));
649		let proposer = proposer_factory.init_with_now(
650			&client.expect_header(hashof0).unwrap(),
651			Box::new(move || {
652				let mut value = cell.lock();
653				if !value.0 {
654					value.0 = true;
655					return value.1;
656				}
657				let old = value.1;
658				let new = old + time::Duration::from_secs(1);
659				*value = (true, new);
660				old
661			}),
662		);
663
664		// when
665		let deadline = time::Duration::from_secs(3);
666		let block = block_on(
667			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
668		)
669		.map(|r| r.block)
670		.unwrap();
671
672		// then
673		// block should have some extrinsics although we have some more in the pool.
674		assert_eq!(block.extrinsics().len(), 1);
675		assert_eq!(txpool.ready().count(), 2);
676	}
677
678	#[test]
679	fn should_not_panic_when_deadline_is_reached() {
680		let client = Arc::new(substrate_test_runtime_client::new());
681		let spawner = sp_core::testing::TaskExecutor::new();
682		let txpool = Arc::from(BasicPool::new_full(
683			Default::default(),
684			true.into(),
685			None,
686			spawner.clone(),
687			client.clone(),
688		));
689
690		let mut proposer_factory =
691			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
692
693		let cell = Mutex::new((false, time::Instant::now()));
694		let proposer = proposer_factory.init_with_now(
695			&client.expect_header(client.info().genesis_hash).unwrap(),
696			Box::new(move || {
697				let mut value = cell.lock();
698				if !value.0 {
699					value.0 = true;
700					return value.1;
701				}
702				let new = value.1 + time::Duration::from_secs(160);
703				*value = (true, new);
704				new
705			}),
706		);
707
708		let deadline = time::Duration::from_secs(1);
709		block_on(
710			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
711		)
712		.map(|r| r.block)
713		.unwrap();
714	}
715
716	#[test]
717	fn proposed_storage_changes_should_match_execute_block_storage_changes() {
718		let (client, backend) = TestClientBuilder::new().build_with_backend();
719		let client = Arc::new(client);
720		let spawner = sp_core::testing::TaskExecutor::new();
721		let txpool = Arc::from(BasicPool::new_full(
722			Default::default(),
723			true.into(),
724			None,
725			spawner.clone(),
726			client.clone(),
727		));
728
729		let genesis_hash = client.info().best_hash;
730
731		block_on(txpool.submit_at(genesis_hash, SOURCE, vec![extrinsic(0)])).unwrap();
732
733		block_on(
734			txpool.maintain(chain_event(
735				client
736					.expect_header(client.info().genesis_hash)
737					.expect("there should be header"),
738			)),
739		);
740
741		let mut proposer_factory =
742			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
743
744		let proposer = proposer_factory.init_with_now(
745			&client.header(genesis_hash).unwrap().unwrap(),
746			Box::new(move || time::Instant::now()),
747		);
748
749		let deadline = time::Duration::from_secs(9);
750		let proposal = block_on(
751			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
752		)
753		.unwrap();
754
755		assert_eq!(proposal.block.extrinsics().len(), 1);
756
757		let api = client.runtime_api();
758		api.execute_block(genesis_hash, proposal.block.into()).unwrap();
759
760		let state = backend.state_at(genesis_hash, TrieCacheContext::Untrusted).unwrap();
761
762		let storage_changes = api.into_storage_changes(&state, genesis_hash).unwrap();
763
764		assert_eq!(
765			proposal.storage_changes.transaction_storage_root,
766			storage_changes.transaction_storage_root,
767		);
768	}
769
770	// This test ensures that if one transaction of a user was rejected, because for example
771	// the weight limit was hit, we don't mark the other transactions of the user as invalid because
772	// the nonce is not matching.
773	#[test]
774	fn should_not_remove_invalid_transactions_from_the_same_sender_after_one_was_invalid() {
775		// given
776		let client = Arc::new(substrate_test_runtime_client::new());
777		let spawner = sp_core::testing::TaskExecutor::new();
778		let txpool = Arc::from(BasicPool::new_full(
779			Default::default(),
780			true.into(),
781			None,
782			spawner.clone(),
783			client.clone(),
784		));
785
786		let medium = |nonce| {
787			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(MEDIUM))
788				.nonce(nonce)
789				.build()
790		};
791		let huge = |nonce| {
792			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE)).nonce(nonce).build()
793		};
794
795		block_on(txpool.submit_at(
796			client.info().genesis_hash,
797			SOURCE,
798			vec![medium(0), medium(1), huge(2), medium(3), huge(4), medium(5), medium(6)],
799		))
800		.unwrap();
801
802		let mut proposer_factory =
803			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
804		let mut propose_block = |client: &TestClient,
805		                         parent_number,
806		                         expected_block_extrinsics,
807		                         expected_pool_transactions| {
808			let hash = client.expect_block_hash_from_id(&BlockId::Number(parent_number)).unwrap();
809			let proposer = proposer_factory.init_with_now(
810				&client.expect_header(hash).unwrap(),
811				Box::new(move || time::Instant::now()),
812			);
813
814			// when
815			let deadline = time::Duration::from_secs(900);
816			let block = block_on(
817				proposer
818					.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
819			)
820			.map(|r| r.block)
821			.unwrap();
822
823			// then
824			// block should have some extrinsics although we have some more in the pool.
825			assert_eq!(
826				txpool.ready().count(),
827				expected_pool_transactions,
828				"at block: {}",
829				block.header.number
830			);
831			assert_eq!(
832				block.extrinsics().len(),
833				expected_block_extrinsics,
834				"at block: {}",
835				block.header.number
836			);
837
838			block
839		};
840
841		let import_and_maintain = |client: Arc<TestClient>, block: TestBlock| {
842			let hash = block.hash();
843			block_on(client.import(BlockOrigin::Own, block)).unwrap();
844			block_on(txpool.maintain(chain_event(
845				client.expect_header(hash).expect("there should be header"),
846			)));
847		};
848
849		block_on(
850			txpool.maintain(chain_event(
851				client
852					.expect_header(client.info().genesis_hash)
853					.expect("there should be header"),
854			)),
855		);
856		assert_eq!(txpool.ready().count(), 7);
857
858		// let's create one block and import it
859		let block = propose_block(&client, 0, 2, 7);
860		import_and_maintain(client.clone(), block.clone());
861		assert_eq!(txpool.ready().count(), 5);
862
863		// now let's make sure that we can still make some progress
864		let block = propose_block(&client, 1, 1, 5);
865		import_and_maintain(client.clone(), block.clone());
866		assert_eq!(txpool.ready().count(), 4);
867
868		// again let's make sure that we can still make some progress
869		let block = propose_block(&client, 2, 1, 4);
870		import_and_maintain(client.clone(), block.clone());
871		assert_eq!(txpool.ready().count(), 3);
872
873		// again let's make sure that we can still make some progress
874		let block = propose_block(&client, 3, 1, 3);
875		import_and_maintain(client.clone(), block.clone());
876		assert_eq!(txpool.ready().count(), 2);
877
878		// again let's make sure that we can still make some progress
879		let block = propose_block(&client, 4, 2, 2);
880		import_and_maintain(client.clone(), block.clone());
881		assert_eq!(txpool.ready().count(), 0);
882	}
883
884	#[test]
885	fn should_cease_building_block_when_block_limit_is_reached() {
886		let client = Arc::new(substrate_test_runtime_client::new());
887		let spawner = sp_core::testing::TaskExecutor::new();
888		let txpool = Arc::from(BasicPool::new_full(
889			Default::default(),
890			true.into(),
891			None,
892			spawner.clone(),
893			client.clone(),
894		));
895		let genesis_hash = client.info().genesis_hash;
896		let genesis_header = client.expect_header(genesis_hash).expect("there should be header");
897
898		let extrinsics_num = 5;
899		let extrinsics = std::iter::once(
900			Transfer {
901				from: Sr25519Keyring::Alice.into(),
902				to: Sr25519Keyring::Bob.into(),
903				amount: 100,
904				nonce: 0,
905			}
906			.into_unchecked_extrinsic(),
907		)
908		.chain((1..extrinsics_num as u64).map(extrinsic))
909		.collect::<Vec<_>>();
910
911		let block_limit = genesis_header.encoded_size() +
912			extrinsics
913				.iter()
914				.take(extrinsics_num - 1)
915				.map(Encode::encoded_size)
916				.sum::<usize>() +
917			Vec::<Extrinsic>::new().encoded_size();
918
919		block_on(txpool.submit_at(genesis_hash, SOURCE, extrinsics.clone())).unwrap();
920
921		block_on(txpool.maintain(chain_event(genesis_header.clone())));
922
923		let mut proposer_factory =
924			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
925
926		let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
927
928		// Give it enough time
929		let deadline = time::Duration::from_secs(300);
930		let block = block_on(proposer.propose_block(ProposeArgs {
931			max_duration: deadline,
932			block_size_limit: Some(block_limit),
933			..Default::default()
934		}))
935		.map(|r| r.block)
936		.unwrap();
937
938		// Based on the block limit, one transaction shouldn't be included.
939		assert_eq!(block.extrinsics().len(), extrinsics_num - 1);
940
941		let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
942
943		let block = block_on(
944			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
945		)
946		.map(|r| r.block)
947		.unwrap();
948
949		// Without a block limit we should include all of them
950		assert_eq!(block.extrinsics().len(), extrinsics_num);
951
952		let mut proposer_factory =
953			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
954
955		let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
956
957		// Exact block_limit, which includes:
958		// 99 (header_size) + 718 (proof@initialize_block) + 246 (one Transfer extrinsic)
959		let block_limit = {
960			let builder = BlockBuilderBuilder::new(&*client)
961				.on_parent_block(genesis_header.hash())
962				.with_parent_block_number(0)
963				.enable_proof_recording()
964				.build()
965				.unwrap();
966			builder.estimate_block_size() + extrinsics[0].encoded_size()
967		};
968		let block = block_on(proposer.propose_block(ProposeArgs {
969			max_duration: deadline,
970			block_size_limit: Some(block_limit),
971			storage_proof_recorder: Some(Default::default()),
972			..Default::default()
973		}))
974		.map(|r| r.block)
975		.unwrap();
976
977		// The block limit was increased, but we now include the proof in the estimation of the
978		// block size and thus, only the `Transfer` will fit into the block. It reads more data
979		// than we have reserved in the block limit.
980		assert_eq!(block.extrinsics().len(), 1);
981	}
982
983	#[test]
984	fn should_keep_adding_transactions_after_exhausts_resources_before_soft_deadline() {
985		// given
986		let client = Arc::new(substrate_test_runtime_client::new());
987		let spawner = sp_core::testing::TaskExecutor::new();
988		let txpool = Arc::from(BasicPool::new_full(
989			Default::default(),
990			true.into(),
991			None,
992			spawner.clone(),
993			client.clone(),
994		));
995		let genesis_hash = client.info().genesis_hash;
996
997		let tiny = |nonce| {
998			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)).nonce(nonce).build()
999		};
1000		let huge = |who| {
1001			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE))
1002				.signer(Sr25519Keyring::numeric(who))
1003				.build()
1004		};
1005
1006		block_on(
1007			txpool.submit_at(
1008				genesis_hash,
1009				SOURCE,
1010				// add 2 * MAX_SKIPPED_TRANSACTIONS that exhaust resources
1011				(0..MAX_SKIPPED_TRANSACTIONS * 2)
1012					.into_iter()
1013					.map(huge)
1014					// and some transactions that are okay.
1015					.chain((0..MAX_SKIPPED_TRANSACTIONS as u64).into_iter().map(tiny))
1016					.collect(),
1017			),
1018		)
1019		.unwrap();
1020
1021		block_on(txpool.maintain(chain_event(
1022			client.expect_header(genesis_hash).expect("there should be header"),
1023		)));
1024		assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 3);
1025
1026		let mut proposer_factory =
1027			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
1028
1029		let cell = Mutex::new(time::Instant::now());
1030		let proposer = proposer_factory.init_with_now(
1031			&client.expect_header(genesis_hash).unwrap(),
1032			Box::new(move || {
1033				let mut value = cell.lock();
1034				let old = *value;
1035				*value = old + time::Duration::from_secs(1);
1036				old
1037			}),
1038		);
1039
1040		// when
1041		// give it enough time so that deadline is never triggered.
1042		let deadline = time::Duration::from_secs(900);
1043		let block = block_on(
1044			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
1045		)
1046		.map(|r| r.block)
1047		.unwrap();
1048
1049		// then block should have all non-exhaust resources extrinsics (+ the first one).
1050		assert_eq!(block.extrinsics().len(), MAX_SKIPPED_TRANSACTIONS + 1);
1051	}
1052
1053	#[test]
1054	fn should_only_skip_up_to_some_limit_after_soft_deadline() {
1055		// given
1056		let client = Arc::new(substrate_test_runtime_client::new());
1057		let spawner = sp_core::testing::TaskExecutor::new();
1058		let txpool = Arc::from(BasicPool::new_full(
1059			Default::default(),
1060			true.into(),
1061			None,
1062			spawner.clone(),
1063			client.clone(),
1064		));
1065		let genesis_hash = client.info().genesis_hash;
1066
1067		let tiny = |who| {
1068			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY))
1069				.signer(Sr25519Keyring::numeric(who))
1070				.nonce(1)
1071				.build()
1072		};
1073		let huge = |who| {
1074			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE))
1075				.signer(Sr25519Keyring::numeric(who))
1076				.build()
1077		};
1078
1079		block_on(
1080			txpool.submit_at(
1081				genesis_hash,
1082				SOURCE,
1083				(0..MAX_SKIPPED_TRANSACTIONS + 2)
1084					.into_iter()
1085					.map(huge)
1086					// and some transactions that are okay.
1087					.chain((0..MAX_SKIPPED_TRANSACTIONS + 2).into_iter().map(tiny))
1088					.collect(),
1089			),
1090		)
1091		.unwrap();
1092
1093		block_on(txpool.maintain(chain_event(
1094			client.expect_header(genesis_hash).expect("there should be header"),
1095		)));
1096		assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 2 + 4);
1097
1098		let mut proposer_factory =
1099			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
1100
1101		let deadline = time::Duration::from_secs(600);
1102		let cell = Arc::new(Mutex::new((0, time::Instant::now())));
1103		let cell2 = cell.clone();
1104		let proposer = proposer_factory.init_with_now(
1105			&client.expect_header(genesis_hash).unwrap(),
1106			Box::new(move || {
1107				let mut value = cell.lock();
1108				let (called, old) = *value;
1109				// add time after deadline is calculated internally (hence 1)
1110				let increase = if called == 1 {
1111					// we start after the soft_deadline should have already been reached.
1112					deadline / 2
1113				} else {
1114					// but we make sure to never reach the actual deadline
1115					time::Duration::from_millis(0)
1116				};
1117				*value = (called + 1, old + increase);
1118				old
1119			}),
1120		);
1121
1122		let block = block_on(
1123			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
1124		)
1125		.map(|r| r.block)
1126		.unwrap();
1127
1128		// then the block should have one or two transactions. This maybe random as they are
1129		// processed in parallel. The same signer and consecutive nonces for huge and tiny
1130		// transactions guarantees that max two transactions will get to the block.
1131		assert!(
1132			(1..3).contains(&block.extrinsics().len()),
1133			"Block shall contain one or two extrinsics."
1134		);
1135		assert!(
1136			cell2.lock().0 > MAX_SKIPPED_TRANSACTIONS,
1137			"Not enough calls to current time, which indicates the test might have ended because of deadline, not soft deadline"
1138		);
1139	}
1140}