Skip to main content

soil_service/basic_authorship/
basic_authorship.rs

1// This file is part of Soil.
2
3// Copyright (C) Soil contributors.
4// Copyright (C) Parity Technologies (UK) Ltd.
5// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
6
7//! A consensus proposer for "basic" chains which use the primitive inherent-data.
8
9// FIXME #1021 move this into sp-consensus
10
11use crate::proposer_metrics::{EndProposingReason, MetricsLink as PrometheusMetrics};
12use codec::Encode;
13use futures::{
14	channel::oneshot,
15	future,
16	future::{Future, FutureExt},
17};
18use log::{debug, error, info, log_enabled, trace, warn, Level};
19use soil_prometheus::Registry as PrometheusRegistry;
20use soil_client::block_builder::{BlockBuilderApi, BlockBuilderBuilder};
21use soil_client::blockchain::{
22	ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend,
23};
24use soil_client::consensus::{Proposal, ProposeArgs};
25use soil_client::transaction_pool::{InPoolTransaction, TransactionPool, TxInvalidityReportMap};
26use soil_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO};
27use std::{pin::Pin, sync::Arc, time};
28use subsoil::api::{ApiExt, CallApiAt, ProvideRuntimeApi};
29use subsoil::core::traits::SpawnNamed;
30use subsoil::inherents::InherentData;
31use subsoil::runtime::{
32	traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT},
33	ExtrinsicInclusionMode, Percent, SaturatedConversion,
34};
35
36/// Default block size limit in bytes used by [`Proposer`].
37///
38/// Can be overwritten by [`ProposerFactory::set_default_block_size_limit`].
39///
40/// Be aware that there is also an upper packet size on what the networking code
41/// will accept. If the block doesn't fit in such a package, it can not be
42/// transferred to other nodes.
43pub const DEFAULT_BLOCK_SIZE_LIMIT: usize = 4 * 1024 * 1024 + 512;
44
45const DEFAULT_SOFT_DEADLINE_PERCENT: Percent = Percent::from_percent(50);
46
47const LOG_TARGET: &'static str = "basic-authorship";
48
49/// [`Proposer`] factory.
50pub struct ProposerFactory<A, C> {
51	spawn_handle: Box<dyn SpawnNamed>,
52	/// The client instance.
53	client: Arc<C>,
54	/// The transaction pool.
55	transaction_pool: Arc<A>,
56	/// Prometheus Link,
57	metrics: PrometheusMetrics,
58	/// The default block size limit.
59	///
60	/// If no `block_size_limit` is passed to [`soil_client::consensus::Proposer::propose`], this block size
61	/// limit will be used.
62	default_block_size_limit: usize,
63	/// Soft deadline percentage of hard deadline.
64	///
65	/// The value is used to compute soft deadline during block production.
66	/// The soft deadline indicates where we should stop attempting to add transactions
67	/// to the block, which exhaust resources. After soft deadline is reached,
68	/// we switch to a fixed-amount mode, in which after we see `MAX_SKIPPED_TRANSACTIONS`
69	/// transactions which exhaust resources, we will conclude that the block is full.
70	soft_deadline_percent: Percent,
71	telemetry: Option<TelemetryHandle>,
72}
73
74impl<A, C> Clone for ProposerFactory<A, C> {
75	fn clone(&self) -> Self {
76		Self {
77			spawn_handle: self.spawn_handle.clone(),
78			client: self.client.clone(),
79			transaction_pool: self.transaction_pool.clone(),
80			metrics: self.metrics.clone(),
81			default_block_size_limit: self.default_block_size_limit,
82			soft_deadline_percent: self.soft_deadline_percent,
83			telemetry: self.telemetry.clone(),
84		}
85	}
86}
87
88impl<A, C> ProposerFactory<A, C> {
89	/// Create a new proposer factory.
90	pub fn new(
91		spawn_handle: impl SpawnNamed + 'static,
92		client: Arc<C>,
93		transaction_pool: Arc<A>,
94		prometheus: Option<&PrometheusRegistry>,
95		telemetry: Option<TelemetryHandle>,
96	) -> Self {
97		ProposerFactory {
98			spawn_handle: Box::new(spawn_handle),
99			transaction_pool,
100			metrics: PrometheusMetrics::new(prometheus),
101			default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT,
102			soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT,
103			telemetry,
104			client,
105		}
106	}
107
108	/// Deprecated, use [`Self::new`] instead.
109	#[deprecated(note = "Proof recording is now handled differently. Use `new` instead.")]
110	pub fn with_proof_recording(
111		spawn_handle: impl SpawnNamed + 'static,
112		client: Arc<C>,
113		transaction_pool: Arc<A>,
114		prometheus: Option<&PrometheusRegistry>,
115		telemetry: Option<TelemetryHandle>,
116	) -> Self {
117		Self::new(spawn_handle, client, transaction_pool, prometheus, telemetry)
118	}
119
120	/// Set the default block size limit in bytes.
121	///
122	/// The default value for the block size limit is:
123	/// [`DEFAULT_BLOCK_SIZE_LIMIT`].
124	///
125	/// If there is no block size limit passed to [`soil_client::consensus::Proposer::propose`], this value
126	/// will be used.
127	pub fn set_default_block_size_limit(&mut self, limit: usize) {
128		self.default_block_size_limit = limit;
129	}
130
131	/// Set soft deadline percentage.
132	///
133	/// The value is used to compute soft deadline during block production.
134	/// The soft deadline indicates where we should stop attempting to add transactions
135	/// to the block, which exhaust resources. After soft deadline is reached,
136	/// we switch to a fixed-amount mode, in which after we see `MAX_SKIPPED_TRANSACTIONS`
137	/// transactions which exhaust resources, we will conclude that the block is full.
138	///
139	/// Setting the value too low will significantly limit the amount of transactions
140	/// we try in case they exhaust resources. Setting the value too high can
141	/// potentially open a DoS vector, where many "exhaust resources" transactions
142	/// are being tried with no success, hence block producer ends up creating an empty block.
143	pub fn set_soft_deadline(&mut self, percent: Percent) {
144		self.soft_deadline_percent = percent;
145	}
146}
147
148impl<Block, C, A> ProposerFactory<A, C>
149where
150	A: TransactionPool<Block = Block> + 'static,
151	Block: BlockT,
152	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + Send + Sync + 'static,
153	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
154{
155	fn init_with_now(
156		&mut self,
157		parent_header: &<Block as BlockT>::Header,
158		now: Box<dyn Fn() -> time::Instant + Send + Sync>,
159	) -> Proposer<Block, C, A> {
160		let parent_hash = parent_header.hash();
161
162		info!(
163			"🙌 Starting consensus session on top of parent {:?} (#{})",
164			parent_hash,
165			parent_header.number()
166		);
167
168		let proposer = Proposer::<_, _, _> {
169			spawn_handle: self.spawn_handle.clone(),
170			client: self.client.clone(),
171			parent_hash,
172			parent_number: *parent_header.number(),
173			transaction_pool: self.transaction_pool.clone(),
174			now,
175			metrics: self.metrics.clone(),
176			default_block_size_limit: self.default_block_size_limit,
177			soft_deadline_percent: self.soft_deadline_percent,
178			telemetry: self.telemetry.clone(),
179		};
180
181		proposer
182	}
183}
184
185impl<A, Block, C> soil_client::consensus::Environment<Block> for ProposerFactory<A, C>
186where
187	A: TransactionPool<Block = Block> + 'static,
188	Block: BlockT,
189	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
190	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
191{
192	type CreateProposer = future::Ready<Result<Self::Proposer, Self::Error>>;
193	type Proposer = Proposer<Block, C, A>;
194	type Error = soil_client::blockchain::Error;
195
196	fn init(&mut self, parent_header: &<Block as BlockT>::Header) -> Self::CreateProposer {
197		future::ready(Ok(self.init_with_now(parent_header, Box::new(time::Instant::now))))
198	}
199}
200
201/// The proposer logic.
202pub struct Proposer<Block: BlockT, C, A: TransactionPool> {
203	spawn_handle: Box<dyn SpawnNamed>,
204	client: Arc<C>,
205	parent_hash: Block::Hash,
206	parent_number: <<Block as BlockT>::Header as HeaderT>::Number,
207	transaction_pool: Arc<A>,
208	now: Box<dyn Fn() -> time::Instant + Send + Sync>,
209	metrics: PrometheusMetrics,
210	default_block_size_limit: usize,
211	soft_deadline_percent: Percent,
212	telemetry: Option<TelemetryHandle>,
213}
214
215impl<A, Block, C> soil_client::consensus::Proposer<Block> for Proposer<Block, C, A>
216where
217	A: TransactionPool<Block = Block> + 'static,
218	Block: BlockT,
219	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
220	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
221{
222	type Proposal = Pin<Box<dyn Future<Output = Result<Proposal<Block>, Self::Error>> + Send>>;
223	type Error = soil_client::blockchain::Error;
224
225	fn propose(self, args: ProposeArgs<Block>) -> Self::Proposal {
226		Self::propose_block(self, args).boxed()
227	}
228}
229
230/// If the block is full we will attempt to push at most
231/// this number of transactions before quitting for real.
232/// It allows us to increase block utilization.
233const MAX_SKIPPED_TRANSACTIONS: usize = 8;
234
235impl<A, Block, C> Proposer<Block, C, A>
236where
237	A: TransactionPool<Block = Block> + 'static,
238	Block: BlockT,
239	C: HeaderBackend<Block> + ProvideRuntimeApi<Block> + CallApiAt<Block> + Send + Sync + 'static,
240	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
241{
242	/// Propose a new block.
243	pub async fn propose_block(
244		self,
245		args: ProposeArgs<Block>,
246	) -> Result<Proposal<Block>, soil_client::blockchain::Error> {
247		let (tx, rx) = oneshot::channel();
248		let spawn_handle = self.spawn_handle.clone();
249
250		// Spawn on a new thread, because block production is a blocking operation.
251		spawn_handle.spawn_blocking(
252			"basic-authorship-proposer",
253			None,
254			async move {
255				let res = self.propose_with(args).await;
256				if tx.send(res).is_err() {
257					trace!(
258						target: LOG_TARGET,
259						"Could not send block production result to proposer!"
260					);
261				}
262			}
263			.boxed(),
264		);
265
266		rx.await?.map_err(Into::into)
267	}
268
269	async fn propose_with(
270		self,
271		args: ProposeArgs<Block>,
272	) -> Result<Proposal<Block>, soil_client::blockchain::Error> {
273		let ProposeArgs {
274			inherent_data,
275			inherent_digests,
276			max_duration,
277			block_size_limit,
278			storage_proof_recorder,
279			extra_extensions,
280		} = args;
281		// leave some time for evaluation and block finalization (10%)
282		let deadline = (self.now)() + max_duration - max_duration / 10;
283		let block_timer = time::Instant::now();
284
285		let mut block_builder = BlockBuilderBuilder::new(&*self.client)
286			.on_parent_block(self.parent_hash)
287			.with_parent_block_number(self.parent_number)
288			.with_proof_recorder(storage_proof_recorder)
289			.with_inherent_digests(inherent_digests)
290			.with_extra_extensions(extra_extensions)
291			.build()?;
292
293		self.apply_inherents(&mut block_builder, inherent_data)?;
294
295		let mode = block_builder.extrinsic_inclusion_mode();
296		let end_reason = match mode {
297			ExtrinsicInclusionMode::AllExtrinsics => {
298				self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?
299			},
300			ExtrinsicInclusionMode::OnlyInherents => EndProposingReason::TransactionForbidden,
301		};
302		let (block, storage_changes) = block_builder.build()?.into_inner();
303		let block_took = block_timer.elapsed();
304
305		self.print_summary(&block, end_reason, block_took, block_timer.elapsed());
306		Ok(Proposal { block, storage_changes })
307	}
308
309	/// Apply all inherents to the block.
310	fn apply_inherents(
311		&self,
312		block_builder: &mut soil_client::block_builder::BlockBuilder<'_, Block, C>,
313		inherent_data: InherentData,
314	) -> Result<(), soil_client::blockchain::Error> {
315		let create_inherents_start = time::Instant::now();
316
317		let inherent_identifiers = log_enabled!(target: LOG_TARGET, Level::Debug).then(|| {
318			inherent_data
319				.identifiers()
320				.map(|id| String::from_utf8_lossy(id).to_string())
321				.collect::<Vec<String>>()
322		});
323
324		let inherents = block_builder.create_inherents(inherent_data)?;
325		let create_inherents_end = time::Instant::now();
326
327		debug!(target: LOG_TARGET, "apply_inherents: Runtime provided {} inherents. Inherent identifiers present: {:?}", inherents.len(), inherent_identifiers);
328
329		self.metrics.report(|metrics| {
330			metrics.create_inherents_time.observe(
331				create_inherents_end
332					.saturating_duration_since(create_inherents_start)
333					.as_secs_f64(),
334			);
335		});
336
337		for inherent in inherents {
338			match block_builder.push(inherent) {
339				Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => {
340					warn!(
341						target: LOG_TARGET,
342						"⚠️  Dropping non-mandatory inherent from overweight block."
343					)
344				},
345				Err(ApplyExtrinsicFailed(Validity(e))) if e.was_mandatory() => {
346					error!(
347						"❌️ Mandatory inherent extrinsic returned error. Block cannot be produced."
348					);
349					return Err(ApplyExtrinsicFailed(Validity(e)));
350				},
351				Err(e) => {
352					warn!(
353						target: LOG_TARGET,
354						"❗️ Inherent extrinsic returned unexpected error: {}. Dropping.", e
355					);
356				},
357				Ok(_) => {},
358			}
359		}
360		Ok(())
361	}
362
363	/// Apply as many extrinsics as possible to the block.
364	async fn apply_extrinsics(
365		&self,
366		block_builder: &mut soil_client::block_builder::BlockBuilder<'_, Block, C>,
367		deadline: time::Instant,
368		block_size_limit: Option<usize>,
369	) -> Result<EndProposingReason, soil_client::blockchain::Error> {
370		// proceed with transactions
371		// We calculate soft deadline used only in case we start skipping transactions.
372		let now = (self.now)();
373		let left = deadline.saturating_duration_since(now);
374		let left_micros: u64 = left.as_micros().saturated_into();
375		let soft_deadline =
376			now + time::Duration::from_micros(self.soft_deadline_percent.mul_floor(left_micros));
377		let mut skipped = 0;
378		let mut unqueue_invalid = TxInvalidityReportMap::new();
379		let mut limit_hit_reason: Option<EndProposingReason> = None;
380
381		let delay = deadline.saturating_duration_since((self.now)()) / 8;
382		let mut pending_iterator =
383			self.transaction_pool.ready_at_with_timeout(self.parent_hash, delay).await;
384
385		let block_size_limit = block_size_limit.unwrap_or(self.default_block_size_limit);
386
387		debug!(target: LOG_TARGET, "Attempting to push transactions from the pool at {:?}.", self.parent_hash);
388		let mut transaction_pushed = false;
389
390		let end_reason = loop {
391			let pending_tx = if let Some(pending_tx) = pending_iterator.next() {
392				pending_tx
393			} else {
394				debug!(
395					target: LOG_TARGET,
396					"No more transactions, proceeding with proposing."
397				);
398
399				break limit_hit_reason.unwrap_or(EndProposingReason::NoMoreTransactions);
400			};
401
402			let now = (self.now)();
403			if now > deadline {
404				debug!(
405					target: LOG_TARGET,
406					"Consensus deadline reached when pushing block transactions, \
407				proceeding with proposing."
408				);
409				break limit_hit_reason.unwrap_or(EndProposingReason::HitDeadline);
410			}
411
412			let pending_tx_data = (**pending_tx.data()).clone();
413			let pending_tx_hash = pending_tx.hash().clone();
414
415			let block_size = block_builder.estimate_block_size();
416			if block_size + pending_tx_data.encoded_size() > block_size_limit {
417				pending_iterator.report_invalid(&pending_tx);
418				limit_hit_reason = Some(EndProposingReason::HitBlockSizeLimit);
419				if skipped < MAX_SKIPPED_TRANSACTIONS {
420					skipped += 1;
421					debug!(
422						target: LOG_TARGET,
423						"Transaction would overflow the block size limit, \
424					 but will try {} more transactions before quitting.",
425						MAX_SKIPPED_TRANSACTIONS - skipped,
426					);
427					continue;
428				} else if now < soft_deadline {
429					debug!(
430						target: LOG_TARGET,
431						"Transaction would overflow the block size limit, \
432					 but we still have time before the soft deadline, so \
433					 we will try a bit more."
434					);
435					continue;
436				} else {
437					debug!(
438						target: LOG_TARGET,
439						"Reached block size limit, proceeding with proposing."
440					);
441					break EndProposingReason::HitBlockSizeLimit;
442				}
443			}
444
445			trace!(target: LOG_TARGET, "[{:?}] Pushing to the block.", pending_tx_hash);
446			match soil_client::block_builder::BlockBuilder::push(block_builder, pending_tx_data) {
447				Ok(()) => {
448					transaction_pushed = true;
449					limit_hit_reason = None;
450					trace!(target: LOG_TARGET, "[{:?}] Pushed to the block.", pending_tx_hash);
451				},
452				Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => {
453					pending_iterator.report_invalid(&pending_tx);
454					limit_hit_reason = Some(EndProposingReason::HitBlockWeightLimit);
455					if skipped < MAX_SKIPPED_TRANSACTIONS {
456						skipped += 1;
457						debug!(target: LOG_TARGET,
458							"Block seems full, but will try {} more transactions before quitting.",
459							MAX_SKIPPED_TRANSACTIONS - skipped,
460						);
461					} else if (self.now)() < soft_deadline {
462						debug!(target: LOG_TARGET,
463							"Block seems full, but we still have time before the soft deadline, \
464							 so we will try a bit more before quitting."
465						);
466					} else {
467						debug!(
468							target: LOG_TARGET,
469							"Reached block weight limit, proceeding with proposing."
470						);
471						break EndProposingReason::HitBlockWeightLimit;
472					}
473				},
474				Err(e) => {
475					pending_iterator.report_invalid(&pending_tx);
476					debug!(
477						target: LOG_TARGET,
478						"[{:?}] Invalid transaction: {} at: {}", pending_tx_hash, e, self.parent_hash
479					);
480
481					let error_to_report = match e {
482						ApplyExtrinsicFailed(Validity(e)) => Some(e),
483						_ => None,
484					};
485
486					unqueue_invalid.insert(pending_tx_hash, error_to_report);
487				},
488			}
489		};
490
491		if matches!(end_reason, EndProposingReason::HitBlockSizeLimit) && !transaction_pushed {
492			warn!(
493				target: LOG_TARGET,
494				"Hit block size limit of `{}` without including any transaction!", block_size_limit,
495			);
496		}
497
498		self.transaction_pool
499			.report_invalid(Some(self.parent_hash), unqueue_invalid)
500			.await;
501		Ok(end_reason)
502	}
503
504	/// Prints a summary and does telemetry + metrics.
505	///
506	/// - `block`: The block that was build.
507	/// - `end_reason`: Why did we stop producing the block?
508	/// - `block_took`: How long did it took to produce the actual block?
509	/// - `propose_took`: How long did the entire proposing took?
510	fn print_summary(
511		&self,
512		block: &Block,
513		end_reason: EndProposingReason,
514		block_took: time::Duration,
515		propose_took: time::Duration,
516	) {
517		let extrinsics = block.extrinsics();
518		self.metrics.report(|metrics| {
519			metrics.number_of_transactions.set(extrinsics.len() as u64);
520			metrics.block_constructed.observe(block_took.as_secs_f64());
521			metrics.report_end_proposing_reason(end_reason);
522			metrics.create_block_proposal_time.observe(propose_took.as_secs_f64());
523		});
524
525		let extrinsics_summary = if extrinsics.is_empty() {
526			"no extrinsics".to_string()
527		} else {
528			format!(
529				"extrinsics ({}): [{}]",
530				extrinsics.len(),
531				extrinsics
532					.iter()
533					.map(|xt| BlakeTwo256::hash_of(xt).to_string())
534					.collect::<Vec<_>>()
535					.join(", ")
536			)
537		};
538
539		if log::log_enabled!(log::Level::Info) {
540			info!(
541				"🎁 Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; extrinsics_count: {}",
542				block.header().number(),
543				block_took.as_millis(),
544				<Block as BlockT>::Hash::from(block.header().hash()),
545				block.header().parent_hash(),
546				end_reason,
547				extrinsics.len()
548			)
549		} else if log::log_enabled!(log::Level::Trace) {
550			trace!(
551				"🎁 Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; {extrinsics_summary}",
552				block.header().number(),
553				block_took.as_millis(),
554				<Block as BlockT>::Hash::from(block.header().hash()),
555				block.header().parent_hash(),
556				end_reason
557			);
558		}
559
560		telemetry!(
561			self.telemetry;
562			CONSENSUS_INFO;
563			"prepared_block_for_proposing";
564			"number" => ?block.header().number(),
565			"hash" => ?<Block as BlockT>::Hash::from(block.header().hash()),
566		);
567	}
568}
569
570#[cfg(test)]
571mod tests {
572	use super::*;
573	use futures::executor::block_on;
574	use parking_lot::Mutex;
575	use soil_client::blockchain::HeaderBackend;
576	use soil_client::client_api::{Backend, TrieCacheContext};
577	use soil_client::consensus::{BlockOrigin, Environment};
578	use soil_client::transaction_pool::{ChainEvent, MaintainedTransactionPool, TransactionSource};
579	use soil_txpool::BasicPool;
580	use subsoil::api::Core;
581	use subsoil::runtime::{generic::BlockId, traits::NumberFor, Perbill};
582	use soil_test_node_runtime_client::{
583		prelude::*,
584		runtime::{Block as TestBlock, Extrinsic, ExtrinsicBuilder, Transfer},
585		TestClientBuilder, TestClientBuilderExt,
586	};
587
588	const SOURCE: TransactionSource = TransactionSource::External;
589
590	// Note:
591	// Maximum normal extrinsic size for `soil_test_node_runtime` is ~65% of max_block (refer to
592	// `soil_test_node_runtime::RuntimeBlockWeights` for details).
593	// This extrinsic sizing allows for:
594	// - one huge xts + a lot of tiny dust
595	// - one huge, no medium,
596	// - two medium xts
597	// This is widely exploited in following tests.
598	const HUGE: u32 = 649000000;
599	const MEDIUM: u32 = 250000000;
600	const TINY: u32 = 1000;
601
602	fn extrinsic(nonce: u64) -> Extrinsic {
603		ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)).nonce(nonce).build()
604	}
605
606	fn chain_event<B: BlockT>(header: B::Header) -> ChainEvent<B>
607	where
608		NumberFor<B>: From<u64>,
609	{
610		ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }
611	}
612
613	#[test]
614	fn should_cease_building_block_when_deadline_is_reached() {
615		// given
616		let client = Arc::new(soil_test_node_runtime_client::new());
617		let spawner = subsoil::core::testing::TaskExecutor::new();
618		let txpool = Arc::from(BasicPool::new_full(
619			Default::default(),
620			true.into(),
621			None,
622			spawner.clone(),
623			client.clone(),
624		));
625
626		let hashof0 = client.info().genesis_hash;
627		block_on(txpool.submit_at(hashof0, SOURCE, vec![extrinsic(0), extrinsic(1)])).unwrap();
628
629		block_on(
630			txpool.maintain(chain_event(
631				client.expect_header(hashof0).expect("there should be header"),
632			)),
633		);
634
635		let mut proposer_factory =
636			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
637
638		let cell = Mutex::new((false, time::Instant::now()));
639		let proposer = proposer_factory.init_with_now(
640			&client.expect_header(hashof0).unwrap(),
641			Box::new(move || {
642				let mut value = cell.lock();
643				if !value.0 {
644					value.0 = true;
645					return value.1;
646				}
647				let old = value.1;
648				let new = old + time::Duration::from_secs(1);
649				*value = (true, new);
650				old
651			}),
652		);
653
654		// when
655		let deadline = time::Duration::from_secs(3);
656		let block = block_on(
657			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
658		)
659		.map(|r| r.block)
660		.unwrap();
661
662		// then
663		// block should have some extrinsics although we have some more in the pool.
664		assert_eq!(block.extrinsics().len(), 1);
665		assert_eq!(txpool.ready().count(), 2);
666	}
667
668	#[test]
669	fn should_not_panic_when_deadline_is_reached() {
670		let client = Arc::new(soil_test_node_runtime_client::new());
671		let spawner = subsoil::core::testing::TaskExecutor::new();
672		let txpool = Arc::from(BasicPool::new_full(
673			Default::default(),
674			true.into(),
675			None,
676			spawner.clone(),
677			client.clone(),
678		));
679
680		let mut proposer_factory =
681			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
682
683		let cell = Mutex::new((false, time::Instant::now()));
684		let proposer = proposer_factory.init_with_now(
685			&client.expect_header(client.info().genesis_hash).unwrap(),
686			Box::new(move || {
687				let mut value = cell.lock();
688				if !value.0 {
689					value.0 = true;
690					return value.1;
691				}
692				let new = value.1 + time::Duration::from_secs(160);
693				*value = (true, new);
694				new
695			}),
696		);
697
698		let deadline = time::Duration::from_secs(1);
699		block_on(
700			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
701		)
702		.map(|r| r.block)
703		.unwrap();
704	}
705
706	#[test]
707	fn proposed_storage_changes_should_match_execute_block_storage_changes() {
708		let (client, backend) = TestClientBuilder::new().build_with_backend();
709		let client = Arc::new(client);
710		let spawner = subsoil::core::testing::TaskExecutor::new();
711		let txpool = Arc::from(BasicPool::new_full(
712			Default::default(),
713			true.into(),
714			None,
715			spawner.clone(),
716			client.clone(),
717		));
718
719		let genesis_hash = client.info().best_hash;
720
721		block_on(txpool.submit_at(genesis_hash, SOURCE, vec![extrinsic(0)])).unwrap();
722
723		block_on(
724			txpool.maintain(chain_event(
725				client
726					.expect_header(client.info().genesis_hash)
727					.expect("there should be header"),
728			)),
729		);
730
731		let mut proposer_factory =
732			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
733
734		let proposer = proposer_factory.init_with_now(
735			&client.header(genesis_hash).unwrap().unwrap(),
736			Box::new(move || time::Instant::now()),
737		);
738
739		let deadline = time::Duration::from_secs(9);
740		let proposal = block_on(
741			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
742		)
743		.unwrap();
744
745		assert_eq!(proposal.block.extrinsics().len(), 1);
746
747		let api = client.runtime_api();
748		api.execute_block(genesis_hash, proposal.block.into()).unwrap();
749
750		let state = backend.state_at(genesis_hash, TrieCacheContext::Untrusted).unwrap();
751
752		let storage_changes = api.into_storage_changes(&state, genesis_hash).unwrap();
753
754		assert_eq!(
755			proposal.storage_changes.transaction_storage_root,
756			storage_changes.transaction_storage_root,
757		);
758	}
759
760	// This test ensures that if one transaction of a user was rejected, because for example
761	// the weight limit was hit, we don't mark the other transactions of the user as invalid because
762	// the nonce is not matching.
763	#[test]
764	fn should_not_remove_invalid_transactions_from_the_same_sender_after_one_was_invalid() {
765		// given
766		let client = Arc::new(soil_test_node_runtime_client::new());
767		let spawner = subsoil::core::testing::TaskExecutor::new();
768		let txpool = Arc::from(BasicPool::new_full(
769			Default::default(),
770			true.into(),
771			None,
772			spawner.clone(),
773			client.clone(),
774		));
775
776		let medium = |nonce| {
777			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(MEDIUM))
778				.nonce(nonce)
779				.build()
780		};
781		let huge = |nonce| {
782			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE)).nonce(nonce).build()
783		};
784
785		block_on(txpool.submit_at(
786			client.info().genesis_hash,
787			SOURCE,
788			vec![medium(0), medium(1), huge(2), medium(3), huge(4), medium(5), medium(6)],
789		))
790		.unwrap();
791
792		let mut proposer_factory =
793			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
794		let mut propose_block = |client: &TestClient,
795		                         parent_number,
796		                         expected_block_extrinsics,
797		                         expected_pool_transactions| {
798			let hash = client.expect_block_hash_from_id(&BlockId::Number(parent_number)).unwrap();
799			let proposer = proposer_factory.init_with_now(
800				&client.expect_header(hash).unwrap(),
801				Box::new(move || time::Instant::now()),
802			);
803
804			// when
805			let deadline = time::Duration::from_secs(900);
806			let block = block_on(
807				proposer
808					.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
809			)
810			.map(|r| r.block)
811			.unwrap();
812
813			// then
814			// block should have some extrinsics although we have some more in the pool.
815			assert_eq!(
816				txpool.ready().count(),
817				expected_pool_transactions,
818				"at block: {}",
819				block.header.number
820			);
821			assert_eq!(
822				block.extrinsics().len(),
823				expected_block_extrinsics,
824				"at block: {}",
825				block.header.number
826			);
827
828			block
829		};
830
831		let import_and_maintain = |client: Arc<TestClient>, block: TestBlock| {
832			let hash = block.hash();
833			block_on(client.import(BlockOrigin::Own, block)).unwrap();
834			block_on(txpool.maintain(chain_event(
835				client.expect_header(hash).expect("there should be header"),
836			)));
837		};
838
839		block_on(
840			txpool.maintain(chain_event(
841				client
842					.expect_header(client.info().genesis_hash)
843					.expect("there should be header"),
844			)),
845		);
846		assert_eq!(txpool.ready().count(), 7);
847
848		// let's create one block and import it
849		let block = propose_block(&client, 0, 2, 7);
850		import_and_maintain(client.clone(), block.clone());
851		assert_eq!(txpool.ready().count(), 5);
852
853		// now let's make sure that we can still make some progress
854		let block = propose_block(&client, 1, 1, 5);
855		import_and_maintain(client.clone(), block.clone());
856		assert_eq!(txpool.ready().count(), 4);
857
858		// again let's make sure that we can still make some progress
859		let block = propose_block(&client, 2, 1, 4);
860		import_and_maintain(client.clone(), block.clone());
861		assert_eq!(txpool.ready().count(), 3);
862
863		// again let's make sure that we can still make some progress
864		let block = propose_block(&client, 3, 1, 3);
865		import_and_maintain(client.clone(), block.clone());
866		assert_eq!(txpool.ready().count(), 2);
867
868		// again let's make sure that we can still make some progress
869		let block = propose_block(&client, 4, 2, 2);
870		import_and_maintain(client.clone(), block.clone());
871		assert_eq!(txpool.ready().count(), 0);
872	}
873
874	#[test]
875	fn should_cease_building_block_when_block_limit_is_reached() {
876		let client = Arc::new(soil_test_node_runtime_client::new());
877		let spawner = subsoil::core::testing::TaskExecutor::new();
878		let txpool = Arc::from(BasicPool::new_full(
879			Default::default(),
880			true.into(),
881			None,
882			spawner.clone(),
883			client.clone(),
884		));
885		let genesis_hash = client.info().genesis_hash;
886		let genesis_header = client.expect_header(genesis_hash).expect("there should be header");
887
888		let extrinsics_num = 5;
889		let extrinsics = std::iter::once(
890			Transfer {
891				from: Sr25519Keyring::Alice.into(),
892				to: Sr25519Keyring::Bob.into(),
893				amount: 100,
894				nonce: 0,
895			}
896			.into_unchecked_extrinsic(),
897		)
898		.chain((1..extrinsics_num as u64).map(extrinsic))
899		.collect::<Vec<_>>();
900
901		let block_limit = genesis_header.encoded_size()
902			+ extrinsics
903				.iter()
904				.take(extrinsics_num - 1)
905				.map(Encode::encoded_size)
906				.sum::<usize>()
907			+ Vec::<Extrinsic>::new().encoded_size();
908
909		block_on(txpool.submit_at(genesis_hash, SOURCE, extrinsics.clone())).unwrap();
910
911		block_on(txpool.maintain(chain_event(genesis_header.clone())));
912
913		let mut proposer_factory =
914			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
915
916		let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
917
918		// Give it enough time
919		let deadline = time::Duration::from_secs(300);
920		let block = block_on(proposer.propose_block(ProposeArgs {
921			max_duration: deadline,
922			block_size_limit: Some(block_limit),
923			..Default::default()
924		}))
925		.map(|r| r.block)
926		.unwrap();
927
928		// Based on the block limit, one transaction shouldn't be included.
929		assert_eq!(block.extrinsics().len(), extrinsics_num - 1);
930
931		let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
932
933		let block = block_on(
934			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
935		)
936		.map(|r| r.block)
937		.unwrap();
938
939		// Without a block limit we should include all of them
940		assert_eq!(block.extrinsics().len(), extrinsics_num);
941
942		let mut proposer_factory =
943			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
944
945		let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap();
946
947		// Exact block_limit, which includes:
948		// 99 (header_size) + 718 (proof@initialize_block) + 246 (one Transfer extrinsic)
949		let block_limit = {
950			let builder = BlockBuilderBuilder::new(&*client)
951				.on_parent_block(genesis_header.hash())
952				.with_parent_block_number(0)
953				.enable_proof_recording()
954				.build()
955				.unwrap();
956			builder.estimate_block_size() + extrinsics[0].encoded_size()
957		};
958		let block = block_on(proposer.propose_block(ProposeArgs {
959			max_duration: deadline,
960			block_size_limit: Some(block_limit),
961			storage_proof_recorder: Some(Default::default()),
962			..Default::default()
963		}))
964		.map(|r| r.block)
965		.unwrap();
966
967		// The block limit was increased, but we now include the proof in the estimation of the
968		// block size and thus, only the `Transfer` will fit into the block. It reads more data
969		// than we have reserved in the block limit.
970		assert_eq!(block.extrinsics().len(), 1);
971	}
972
973	#[test]
974	fn should_keep_adding_transactions_after_exhausts_resources_before_soft_deadline() {
975		// given
976		let client = Arc::new(soil_test_node_runtime_client::new());
977		let spawner = subsoil::core::testing::TaskExecutor::new();
978		let txpool = Arc::from(BasicPool::new_full(
979			Default::default(),
980			true.into(),
981			None,
982			spawner.clone(),
983			client.clone(),
984		));
985		let genesis_hash = client.info().genesis_hash;
986
987		let tiny = |nonce| {
988			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)).nonce(nonce).build()
989		};
990		let huge = |who| {
991			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE))
992				.signer(Sr25519Keyring::numeric(who))
993				.build()
994		};
995
996		block_on(
997			txpool.submit_at(
998				genesis_hash,
999				SOURCE,
1000				// add 2 * MAX_SKIPPED_TRANSACTIONS that exhaust resources
1001				(0..MAX_SKIPPED_TRANSACTIONS * 2)
1002					.into_iter()
1003					.map(huge)
1004					// and some transactions that are okay.
1005					.chain((0..MAX_SKIPPED_TRANSACTIONS as u64).into_iter().map(tiny))
1006					.collect(),
1007			),
1008		)
1009		.unwrap();
1010
1011		block_on(txpool.maintain(chain_event(
1012			client.expect_header(genesis_hash).expect("there should be header"),
1013		)));
1014		assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 3);
1015
1016		let mut proposer_factory =
1017			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
1018
1019		let cell = Mutex::new(time::Instant::now());
1020		let proposer = proposer_factory.init_with_now(
1021			&client.expect_header(genesis_hash).unwrap(),
1022			Box::new(move || {
1023				let mut value = cell.lock();
1024				let old = *value;
1025				*value = old + time::Duration::from_secs(1);
1026				old
1027			}),
1028		);
1029
1030		// when
1031		// give it enough time so that deadline is never triggered.
1032		let deadline = time::Duration::from_secs(900);
1033		let block = block_on(
1034			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
1035		)
1036		.map(|r| r.block)
1037		.unwrap();
1038
1039		// then block should have all non-exhaust resources extrinsics (+ the first one).
1040		assert_eq!(block.extrinsics().len(), MAX_SKIPPED_TRANSACTIONS + 1);
1041	}
1042
1043	#[test]
1044	fn should_only_skip_up_to_some_limit_after_soft_deadline() {
1045		// given
1046		let client = Arc::new(soil_test_node_runtime_client::new());
1047		let spawner = subsoil::core::testing::TaskExecutor::new();
1048		let txpool = Arc::from(BasicPool::new_full(
1049			Default::default(),
1050			true.into(),
1051			None,
1052			spawner.clone(),
1053			client.clone(),
1054		));
1055		let genesis_hash = client.info().genesis_hash;
1056
1057		let tiny = |who| {
1058			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY))
1059				.signer(Sr25519Keyring::numeric(who))
1060				.nonce(1)
1061				.build()
1062		};
1063		let huge = |who| {
1064			ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE))
1065				.signer(Sr25519Keyring::numeric(who))
1066				.build()
1067		};
1068
1069		block_on(
1070			txpool.submit_at(
1071				genesis_hash,
1072				SOURCE,
1073				(0..MAX_SKIPPED_TRANSACTIONS + 2)
1074					.into_iter()
1075					.map(huge)
1076					// and some transactions that are okay.
1077					.chain((0..MAX_SKIPPED_TRANSACTIONS + 2).into_iter().map(tiny))
1078					.collect(),
1079			),
1080		)
1081		.unwrap();
1082
1083		block_on(txpool.maintain(chain_event(
1084			client.expect_header(genesis_hash).expect("there should be header"),
1085		)));
1086		assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 2 + 4);
1087
1088		let mut proposer_factory =
1089			ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);
1090
1091		let deadline = time::Duration::from_secs(600);
1092		let cell = Arc::new(Mutex::new((0, time::Instant::now())));
1093		let cell2 = cell.clone();
1094		let proposer = proposer_factory.init_with_now(
1095			&client.expect_header(genesis_hash).unwrap(),
1096			Box::new(move || {
1097				let mut value = cell.lock();
1098				let (called, old) = *value;
1099				// add time after deadline is calculated internally (hence 1)
1100				let increase = if called == 1 {
1101					// we start after the soft_deadline should have already been reached.
1102					deadline / 2
1103				} else {
1104					// but we make sure to never reach the actual deadline
1105					time::Duration::from_millis(0)
1106				};
1107				*value = (called + 1, old + increase);
1108				old
1109			}),
1110		);
1111
1112		let block = block_on(
1113			proposer.propose_block(ProposeArgs { max_duration: deadline, ..Default::default() }),
1114		)
1115		.map(|r| r.block)
1116		.unwrap();
1117
1118		// then the block should have one or two transactions. This maybe random as they are
1119		// processed in parallel. The same signer and consecutive nonces for huge and tiny
1120		// transactions guarantees that max two transactions will get to the block.
1121		assert!(
1122			(1..3).contains(&block.extrinsics().len()),
1123			"Block shall contain one or two extrinsics."
1124		);
1125		assert!(
1126			cell2.lock().0 > MAX_SKIPPED_TRANSACTIONS,
1127			"Not enough calls to current time, which indicates the test might have ended because of deadline, not soft deadline"
1128		);
1129	}
1130}