sc_service/client/
client.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19//! Substrate Client
20
21use super::{
22	block_rules::{BlockRules, LookupResult as BlockLookupResult},
23	CodeProvider,
24};
25use crate::client::notification_pinning::NotificationPinningWorker;
26use log::{debug, info, trace, warn};
27use parking_lot::{Mutex, RwLock};
28use prometheus_endpoint::Registry;
29use rand::Rng;
30use sc_chain_spec::{resolve_state_version_from_wasm, BuildGenesisBlock};
31use sc_client_api::{
32	backend::{
33		self, apply_aux, BlockImportOperation, ClientImportOperation, FinalizeSummary, Finalizer,
34		ImportNotificationAction, ImportSummary, LockImportRun, NewBlockState, StorageProvider,
35	},
36	client::{
37		BadBlocks, BlockBackend, BlockImportNotification, BlockOf, BlockchainEvents, ClientInfo,
38		FinalityNotification, FinalityNotifications, ForkBlocks, ImportNotifications,
39		PreCommitActions, ProvideUncles,
40	},
41	execution_extensions::ExecutionExtensions,
42	notifications::{StorageEventStream, StorageNotifications},
43	CallExecutor, ExecutorProvider, KeysIter, OnFinalityAction, OnImportAction, PairsIter,
44	ProofProvider, StaleBlock, TrieCacheContext, UnpinWorkerMessage, UsageProvider,
45};
46use sc_consensus::{
47	BlockCheckParams, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction,
48};
49use sc_executor::RuntimeVersion;
50use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO};
51use sp_api::{
52	ApiExt, ApiRef, CallApiAt, CallApiAtParams, ConstructRuntimeApi, Core as CoreApi,
53	ProvideRuntimeApi,
54};
55use sp_blockchain::{
56	self as blockchain, Backend as ChainBackend, CachedHeaderMetadata, Error,
57	HeaderBackend as ChainHeaderBackend, HeaderMetadata, Info as BlockchainInfo,
58};
59use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError};
60
61use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender};
62use sp_core::{
63	storage::{ChildInfo, ChildType, PrefixedStorageKey, StorageChild, StorageData, StorageKey},
64	traits::{CallContext, SpawnNamed},
65};
66use sp_runtime::{
67	generic::{BlockId, SignedBlock},
68	traits::{
69		Block as BlockT, BlockIdTo, HashingFor, Header as HeaderT, NumberFor, One,
70		SaturatedConversion, Zero,
71	},
72	Justification, Justifications, StateVersion,
73};
74use sp_state_machine::{
75	prove_child_read, prove_range_read_with_child_with_size, prove_read,
76	read_range_proof_check_with_child_on_proving_backend, Backend as StateBackend,
77	ChildStorageCollection, KeyValueStates, KeyValueStorageLevel, StorageCollection,
78	MAX_NESTED_TRIE_DEPTH,
79};
80use sp_trie::{proof_size_extension::ProofSizeExt, CompactProof, MerkleValue, StorageProof};
81use std::{
82	collections::{HashMap, HashSet},
83	marker::PhantomData,
84	path::PathBuf,
85	sync::Arc,
86};
87
88use super::call_executor::LocalCallExecutor;
89use sp_core::traits::CodeExecutor;
90
91type NotificationSinks<T> = Mutex<Vec<TracingUnboundedSender<T>>>;
92
93/// Substrate Client
94pub struct Client<B, E, Block, RA>
95where
96	Block: BlockT,
97{
98	backend: Arc<B>,
99	executor: E,
100	storage_notifications: StorageNotifications<Block>,
101	import_notification_sinks: NotificationSinks<BlockImportNotification<Block>>,
102	every_import_notification_sinks: NotificationSinks<BlockImportNotification<Block>>,
103	finality_notification_sinks: NotificationSinks<FinalityNotification<Block>>,
104	// Collects auxiliary operations to be performed atomically together with
105	// block import operations.
106	import_actions: Mutex<Vec<OnImportAction<Block>>>,
107	// Collects auxiliary operations to be performed atomically together with
108	// block finalization operations.
109	finality_actions: Mutex<Vec<OnFinalityAction<Block>>>,
110	// Holds the block hash currently being imported. TODO: replace this with block queue.
111	importing_block: RwLock<Option<Block::Hash>>,
112	block_rules: BlockRules<Block>,
113	config: ClientConfig<Block>,
114	telemetry: Option<TelemetryHandle>,
115	unpin_worker_sender: TracingUnboundedSender<UnpinWorkerMessage<Block>>,
116	code_provider: CodeProvider<Block, B, E>,
117	_phantom: PhantomData<RA>,
118}
119
120/// Used in importing a block, where additional changes are made after the runtime
121/// executed.
122enum PrePostHeader<H> {
123	/// they are the same: no post-runtime digest items.
124	Same(H),
125	/// different headers (pre, post).
126	Different(H, H),
127}
128
129impl<H> PrePostHeader<H> {
130	/// get a reference to the "post-header" -- the header as it should be
131	/// after all changes are applied.
132	fn post(&self) -> &H {
133		match *self {
134			PrePostHeader::Same(ref h) => h,
135			PrePostHeader::Different(_, ref h) => h,
136		}
137	}
138
139	/// convert to the "post-header" -- the header as it should be after
140	/// all changes are applied.
141	fn into_post(self) -> H {
142		match self {
143			PrePostHeader::Same(h) => h,
144			PrePostHeader::Different(_, h) => h,
145		}
146	}
147}
148
149enum PrepareStorageChangesResult<Block: BlockT> {
150	Discard(ImportResult),
151	Import(Option<sc_consensus::StorageChanges<Block>>),
152}
153/// Client configuration items.
154#[derive(Debug, Clone)]
155pub struct ClientConfig<Block: BlockT> {
156	/// Enable the offchain worker db.
157	pub offchain_worker_enabled: bool,
158	/// If true, allows access from the runtime to write into offchain worker db.
159	pub offchain_indexing_api: bool,
160	/// Path where WASM files exist to override the on-chain WASM.
161	pub wasm_runtime_overrides: Option<PathBuf>,
162	/// Skip writing genesis state on first start.
163	pub no_genesis: bool,
164	/// Map of WASM runtime substitute starting at the child of the given block until the runtime
165	/// version doesn't match anymore.
166	pub wasm_runtime_substitutes: HashMap<NumberFor<Block>, Vec<u8>>,
167	/// Enable recording of storage proofs during block import
168	pub enable_import_proof_recording: bool,
169}
170
171impl<Block: BlockT> Default for ClientConfig<Block> {
172	fn default() -> Self {
173		Self {
174			offchain_worker_enabled: false,
175			offchain_indexing_api: false,
176			wasm_runtime_overrides: None,
177			no_genesis: false,
178			wasm_runtime_substitutes: HashMap::new(),
179			enable_import_proof_recording: false,
180		}
181	}
182}
183
184/// Create a client with the explicitly provided backend.
185/// This is useful for testing backend implementations.
186pub fn new_with_backend<B, E, Block, G, RA>(
187	backend: Arc<B>,
188	executor: E,
189	genesis_block_builder: G,
190	spawn_handle: Box<dyn SpawnNamed>,
191	prometheus_registry: Option<Registry>,
192	telemetry: Option<TelemetryHandle>,
193	config: ClientConfig<Block>,
194) -> sp_blockchain::Result<Client<B, LocalCallExecutor<Block, B, E>, Block, RA>>
195where
196	E: CodeExecutor + sc_executor::RuntimeVersionOf,
197	G: BuildGenesisBlock<
198		Block,
199		BlockImportOperation = <B as backend::Backend<Block>>::BlockImportOperation,
200	>,
201	Block: BlockT,
202	B: backend::LocalBackend<Block> + 'static,
203{
204	let extensions = ExecutionExtensions::new(None, Arc::new(executor.clone()));
205
206	let call_executor =
207		LocalCallExecutor::new(backend.clone(), executor, config.clone(), extensions)?;
208
209	Client::new(
210		backend,
211		call_executor,
212		spawn_handle,
213		genesis_block_builder,
214		Default::default(),
215		Default::default(),
216		prometheus_registry,
217		telemetry,
218		config,
219	)
220}
221
222impl<B, E, Block, RA> BlockOf for Client<B, E, Block, RA>
223where
224	B: backend::Backend<Block>,
225	E: CallExecutor<Block>,
226	Block: BlockT,
227{
228	type Type = Block;
229}
230
231impl<B, E, Block, RA> LockImportRun<Block, B> for Client<B, E, Block, RA>
232where
233	B: backend::Backend<Block>,
234	E: CallExecutor<Block>,
235	Block: BlockT,
236{
237	fn lock_import_and_run<R, Err, F>(&self, f: F) -> Result<R, Err>
238	where
239		F: FnOnce(&mut ClientImportOperation<Block, B>) -> Result<R, Err>,
240		Err: From<sp_blockchain::Error>,
241	{
242		let inner = || {
243			let _import_lock = self.backend.get_import_lock().write();
244
245			let mut op = ClientImportOperation {
246				op: self.backend.begin_operation()?,
247				notify_imported: None,
248				notify_finalized: None,
249			};
250
251			let r = f(&mut op)?;
252
253			let ClientImportOperation { mut op, notify_imported, notify_finalized } = op;
254
255			let finality_notification = notify_finalized.map(|summary| {
256				FinalityNotification::from_summary(summary, self.unpin_worker_sender.clone())
257			});
258
259			let (import_notification, storage_changes, import_notification_action) =
260				match notify_imported {
261					Some(mut summary) => {
262						let import_notification_action = summary.import_notification_action;
263						let storage_changes = summary.storage_changes.take();
264						(
265							Some(BlockImportNotification::from_summary(
266								summary,
267								self.unpin_worker_sender.clone(),
268							)),
269							storage_changes,
270							import_notification_action,
271						)
272					},
273					None => (None, None, ImportNotificationAction::None),
274				};
275
276			if let Some(ref notification) = finality_notification {
277				for action in self.finality_actions.lock().iter_mut() {
278					op.insert_aux(action(notification))?;
279				}
280			}
281			if let Some(ref notification) = import_notification {
282				for action in self.import_actions.lock().iter_mut() {
283					op.insert_aux(action(notification))?;
284				}
285			}
286
287			self.backend.commit_operation(op)?;
288
289			// We need to pin the block in the backend once
290			// for each notification. Once all notifications are
291			// dropped, the block will be unpinned automatically.
292			if let Some(ref notification) = finality_notification {
293				if let Err(err) = self.backend.pin_block(notification.hash) {
294					debug!(
295						"Unable to pin block for finality notification. hash: {}, Error: {}",
296						notification.hash, err
297					);
298				} else {
299					let _ = self
300						.unpin_worker_sender
301						.unbounded_send(UnpinWorkerMessage::AnnouncePin(notification.hash))
302						.map_err(|e| {
303							log::error!(
304								"Unable to send AnnouncePin worker message for finality: {e}"
305							)
306						});
307				}
308			}
309
310			if let Some(ref notification) = import_notification {
311				if let Err(err) = self.backend.pin_block(notification.hash) {
312					debug!(
313						"Unable to pin block for import notification. hash: {}, Error: {}",
314						notification.hash, err
315					);
316				} else {
317					let _ = self
318						.unpin_worker_sender
319						.unbounded_send(UnpinWorkerMessage::AnnouncePin(notification.hash))
320						.map_err(|e| {
321							log::error!("Unable to send AnnouncePin worker message for import: {e}")
322						});
323				};
324			}
325
326			self.notify_finalized(finality_notification)?;
327			self.notify_imported(import_notification, import_notification_action, storage_changes)?;
328
329			Ok(r)
330		};
331
332		let result = inner();
333		*self.importing_block.write() = None;
334
335		result
336	}
337}
338
339impl<B, E, Block, RA> LockImportRun<Block, B> for &Client<B, E, Block, RA>
340where
341	Block: BlockT,
342	B: backend::Backend<Block>,
343	E: CallExecutor<Block>,
344{
345	fn lock_import_and_run<R, Err, F>(&self, f: F) -> Result<R, Err>
346	where
347		F: FnOnce(&mut ClientImportOperation<Block, B>) -> Result<R, Err>,
348		Err: From<sp_blockchain::Error>,
349	{
350		(**self).lock_import_and_run(f)
351	}
352}
353
354impl<B, E, Block, RA> Client<B, E, Block, RA>
355where
356	B: backend::Backend<Block>,
357	E: CallExecutor<Block>,
358	Block: BlockT,
359	Block::Header: Clone,
360{
361	/// Creates new Substrate Client with given blockchain and code executor.
362	pub fn new<G>(
363		backend: Arc<B>,
364		executor: E,
365		spawn_handle: Box<dyn SpawnNamed>,
366		genesis_block_builder: G,
367		fork_blocks: ForkBlocks<Block>,
368		bad_blocks: BadBlocks<Block>,
369		prometheus_registry: Option<Registry>,
370		telemetry: Option<TelemetryHandle>,
371		config: ClientConfig<Block>,
372	) -> sp_blockchain::Result<Self>
373	where
374		G: BuildGenesisBlock<
375			Block,
376			BlockImportOperation = <B as backend::Backend<Block>>::BlockImportOperation,
377		>,
378		E: Clone,
379		B: 'static,
380	{
381		let info = backend.blockchain().info();
382		if info.finalized_state.is_none() {
383			let (genesis_block, mut op) = genesis_block_builder.build_genesis_block()?;
384			info!(
385				"🔨 Initializing Genesis block/state (state: {}, header-hash: {})",
386				genesis_block.header().state_root(),
387				genesis_block.header().hash()
388			);
389			// Genesis may be written after some blocks have been imported and finalized.
390			// So we only finalize it when the database is empty.
391			let block_state = if info.best_hash == Default::default() {
392				NewBlockState::Final
393			} else {
394				NewBlockState::Normal
395			};
396			let (header, body) = genesis_block.deconstruct();
397			op.set_block_data(header, Some(body), None, None, block_state)?;
398			backend.commit_operation(op)?;
399		}
400
401		let (unpin_worker_sender, rx) = tracing_unbounded::<UnpinWorkerMessage<Block>>(
402			"notification-pinning-worker-channel",
403			10_000,
404		);
405		let unpin_worker = NotificationPinningWorker::new(rx, backend.clone());
406		spawn_handle.spawn("notification-pinning-worker", None, Box::pin(unpin_worker.run()));
407		let code_provider = CodeProvider::new(&config, executor.clone(), backend.clone())?;
408
409		Ok(Client {
410			backend,
411			executor,
412			storage_notifications: StorageNotifications::new(prometheus_registry),
413			import_notification_sinks: Default::default(),
414			every_import_notification_sinks: Default::default(),
415			finality_notification_sinks: Default::default(),
416			import_actions: Default::default(),
417			finality_actions: Default::default(),
418			importing_block: Default::default(),
419			block_rules: BlockRules::new(fork_blocks, bad_blocks),
420			config,
421			telemetry,
422			unpin_worker_sender,
423			code_provider,
424			_phantom: Default::default(),
425		})
426	}
427
428	/// returns a reference to the block import notification sinks
429	/// useful for test environments.
430	pub fn import_notification_sinks(&self) -> &NotificationSinks<BlockImportNotification<Block>> {
431		&self.import_notification_sinks
432	}
433
434	/// returns a reference to the finality notification sinks
435	/// useful for test environments.
436	pub fn finality_notification_sinks(&self) -> &NotificationSinks<FinalityNotification<Block>> {
437		&self.finality_notification_sinks
438	}
439
440	/// Get a reference to the state at a given block.
441	pub fn state_at(&self, hash: Block::Hash) -> sp_blockchain::Result<B::State> {
442		self.backend.state_at(hash, TrieCacheContext::Untrusted)
443	}
444
445	/// Get the code at a given block.
446	///
447	/// This takes any potential substitutes into account, but ignores overrides.
448	pub fn code_at(&self, hash: Block::Hash) -> sp_blockchain::Result<Vec<u8>> {
449		self.code_provider.code_at_ignoring_overrides(hash)
450	}
451
452	/// Get the RuntimeVersion at a given block.
453	pub fn runtime_version_at(&self, hash: Block::Hash) -> sp_blockchain::Result<RuntimeVersion> {
454		CallExecutor::runtime_version(&self.executor, hash)
455	}
456
457	/// Apply a checked and validated block to an operation.
458	fn apply_block(
459		&self,
460		operation: &mut ClientImportOperation<Block, B>,
461		import_block: BlockImportParams<Block>,
462		storage_changes: Option<sc_consensus::StorageChanges<Block>>,
463	) -> sp_blockchain::Result<ImportResult>
464	where
465		Self: ProvideRuntimeApi<Block>,
466		<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
467	{
468		let BlockImportParams {
469			origin,
470			header,
471			justifications,
472			post_digests,
473			body,
474			indexed_body,
475			finalized,
476			auxiliary,
477			fork_choice,
478			intermediates,
479			import_existing,
480			create_gap,
481			..
482		} = import_block;
483
484		if !intermediates.is_empty() {
485			return Err(Error::IncompletePipeline)
486		}
487
488		let fork_choice = fork_choice.ok_or(Error::IncompletePipeline)?;
489
490		let import_headers = if post_digests.is_empty() {
491			PrePostHeader::Same(header)
492		} else {
493			let mut post_header = header.clone();
494			for item in post_digests {
495				post_header.digest_mut().push(item);
496			}
497			PrePostHeader::Different(header, post_header)
498		};
499
500		let hash = import_headers.post().hash();
501		let height = (*import_headers.post().number()).saturated_into::<u64>();
502
503		*self.importing_block.write() = Some(hash);
504
505		operation.op.set_create_gap(create_gap);
506
507		let result = self.execute_and_import_block(
508			operation,
509			origin,
510			hash,
511			import_headers,
512			justifications,
513			body,
514			indexed_body,
515			storage_changes,
516			finalized,
517			auxiliary,
518			fork_choice,
519			import_existing,
520		);
521
522		if let Ok(ImportResult::Imported(ref aux)) = result {
523			if aux.is_new_best {
524				// don't send telemetry block import events during initial sync for every
525				// block to avoid spamming the telemetry server, these events will be randomly
526				// sent at a rate of 1/10.
527				if origin != BlockOrigin::NetworkInitialSync || rand::thread_rng().gen_bool(0.1) {
528					telemetry!(
529						self.telemetry;
530						SUBSTRATE_INFO;
531						"block.import";
532						"height" => height,
533						"best" => ?hash,
534						"origin" => ?origin
535					);
536				}
537			}
538		}
539
540		result
541	}
542
543	fn execute_and_import_block(
544		&self,
545		operation: &mut ClientImportOperation<Block, B>,
546		origin: BlockOrigin,
547		hash: Block::Hash,
548		import_headers: PrePostHeader<Block::Header>,
549		justifications: Option<Justifications>,
550		body: Option<Vec<Block::Extrinsic>>,
551		indexed_body: Option<Vec<Vec<u8>>>,
552		storage_changes: Option<sc_consensus::StorageChanges<Block>>,
553		finalized: bool,
554		aux: Vec<(Vec<u8>, Option<Vec<u8>>)>,
555		fork_choice: ForkChoiceStrategy,
556		import_existing: bool,
557	) -> sp_blockchain::Result<ImportResult>
558	where
559		Self: ProvideRuntimeApi<Block>,
560		<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
561	{
562		let parent_hash = *import_headers.post().parent_hash();
563		let status = self.backend.blockchain().status(hash)?;
564		let parent_exists =
565			self.backend.blockchain().status(parent_hash)? == blockchain::BlockStatus::InChain;
566		match (import_existing, status) {
567			(false, blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain),
568			(false, blockchain::BlockStatus::Unknown) => {},
569			(true, blockchain::BlockStatus::InChain) => {},
570			(true, blockchain::BlockStatus::Unknown) => {},
571		}
572
573		let info = self.backend.blockchain().info();
574		let gap_block =
575			info.block_gap.map_or(false, |gap| *import_headers.post().number() == gap.start);
576
577		// the block is lower than our last finalized block so it must revert
578		// finality, refusing import.
579		if status == blockchain::BlockStatus::Unknown &&
580			*import_headers.post().number() <= info.finalized_number &&
581			!gap_block
582		{
583			return Err(sp_blockchain::Error::NotInFinalizedChain)
584		}
585
586		// this is a fairly arbitrary choice of where to draw the line on making notifications,
587		// but the general goal is to only make notifications when we are already fully synced
588		// and get a new chain head.
589		let make_notifications = match origin {
590			BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast =>
591				true,
592			BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false,
593		};
594
595		let storage_changes = match storage_changes {
596			Some(storage_changes) => {
597				let storage_changes = match storage_changes {
598					sc_consensus::StorageChanges::Changes(storage_changes) => {
599						self.backend.begin_state_operation(&mut operation.op, parent_hash)?;
600						let (main_sc, child_sc, offchain_sc, tx, _, tx_index) =
601							storage_changes.into_inner();
602
603						if self.config.offchain_indexing_api {
604							operation.op.update_offchain_storage(offchain_sc)?;
605						}
606
607						operation.op.update_db_storage(tx)?;
608						operation.op.update_storage(main_sc.clone(), child_sc.clone())?;
609						operation.op.update_transaction_index(tx_index)?;
610
611						Some((main_sc, child_sc))
612					},
613					sc_consensus::StorageChanges::Import(changes) => {
614						let mut storage = sp_storage::Storage::default();
615						for state in changes.state.0.into_iter() {
616							if state.parent_storage_keys.is_empty() && state.state_root.is_empty() {
617								for (key, value) in state.key_values.into_iter() {
618									storage.top.insert(key, value);
619								}
620							} else {
621								for parent_storage in state.parent_storage_keys {
622									let storage_key = PrefixedStorageKey::new_ref(&parent_storage);
623									let storage_key =
624										match ChildType::from_prefixed_key(storage_key) {
625											Some((ChildType::ParentKeyId, storage_key)) =>
626												storage_key,
627											None =>
628												return Err(Error::Backend(
629													"Invalid child storage key.".to_string(),
630												)),
631										};
632									let entry = storage
633										.children_default
634										.entry(storage_key.to_vec())
635										.or_insert_with(|| StorageChild {
636											data: Default::default(),
637											child_info: ChildInfo::new_default(storage_key),
638										});
639									for (key, value) in state.key_values.iter() {
640										entry.data.insert(key.clone(), value.clone());
641									}
642								}
643							}
644						}
645
646						// This is use by fast sync for runtime version to be resolvable from
647						// changes.
648						let state_version = resolve_state_version_from_wasm::<_, HashingFor<Block>>(
649							&storage,
650							&self.executor,
651						)?;
652						let state_root = operation.op.reset_storage(storage, state_version)?;
653						if state_root != *import_headers.post().state_root() {
654							// State root mismatch when importing state. This should not happen in
655							// safe fast sync mode, but may happen in unsafe mode.
656							warn!("Error importing state: State root mismatch.");
657							return Err(Error::InvalidStateRoot)
658						}
659						None
660					},
661				};
662
663				storage_changes
664			},
665			None => None,
666		};
667
668		// Ensure parent chain is finalized to maintain invariant that finality is called
669		// sequentially.
670		if finalized && parent_exists && info.finalized_hash != parent_hash {
671			self.apply_finality_with_block_hash(
672				operation,
673				parent_hash,
674				None,
675				&info,
676				make_notifications,
677			)?;
678		}
679
680		let is_new_best = !gap_block &&
681			(finalized ||
682				match fork_choice {
683					ForkChoiceStrategy::LongestChain =>
684						import_headers.post().number() > &info.best_number,
685					ForkChoiceStrategy::Custom(v) => v,
686				});
687
688		let leaf_state = if finalized {
689			NewBlockState::Final
690		} else if is_new_best {
691			NewBlockState::Best
692		} else {
693			NewBlockState::Normal
694		};
695
696		let tree_route = if is_new_best && info.best_hash != parent_hash && parent_exists {
697			let route_from_best =
698				sp_blockchain::tree_route(self.backend.blockchain(), info.best_hash, parent_hash)?;
699			Some(route_from_best)
700		} else {
701			None
702		};
703
704		trace!(
705			"Imported {}, (#{}), best={}, origin={:?}",
706			hash,
707			import_headers.post().number(),
708			is_new_best,
709			origin,
710		);
711
712		operation.op.set_block_data(
713			import_headers.post().clone(),
714			body,
715			indexed_body,
716			justifications,
717			leaf_state,
718		)?;
719
720		operation.op.insert_aux(aux)?;
721
722		let should_notify_every_block = !self.every_import_notification_sinks.lock().is_empty();
723
724		// Notify when we are already synced to the tip of the chain
725		// or if this import triggers a re-org
726		let should_notify_recent_block = make_notifications || tree_route.is_some();
727
728		if should_notify_every_block || should_notify_recent_block {
729			let header = import_headers.into_post();
730			if finalized && should_notify_recent_block {
731				let mut summary = match operation.notify_finalized.take() {
732					Some(mut summary) => {
733						summary.header = header.clone();
734						summary.finalized.push(hash);
735						summary
736					},
737					None => FinalizeSummary {
738						header: header.clone(),
739						finalized: vec![hash],
740						stale_blocks: Vec::new(),
741					},
742				};
743
744				if parent_exists {
745					// The stale blocks that will be displaced after the block is finalized.
746					let stale_heads = self.backend.blockchain().displaced_leaves_after_finalizing(
747						hash,
748						*header.number(),
749						parent_hash,
750					)?;
751
752					summary.stale_blocks.extend(stale_heads.displaced_blocks.into_iter().map(
753						|b| StaleBlock {
754							hash: b,
755							is_head: stale_heads.displaced_leaves.iter().any(|(_, h)| *h == b),
756						},
757					));
758				}
759				operation.notify_finalized = Some(summary);
760			}
761
762			let import_notification_action = if should_notify_every_block {
763				if should_notify_recent_block {
764					ImportNotificationAction::Both
765				} else {
766					ImportNotificationAction::EveryBlock
767				}
768			} else {
769				ImportNotificationAction::RecentBlock
770			};
771
772			operation.notify_imported = Some(ImportSummary {
773				hash,
774				origin,
775				header,
776				is_new_best,
777				storage_changes,
778				tree_route,
779				import_notification_action,
780			})
781		}
782
783		Ok(ImportResult::imported(is_new_best))
784	}
785
786	/// Prepares the storage changes for a block.
787	///
788	/// It checks if the state should be enacted and if the `import_block` maybe already provides
789	/// the required storage changes. If the state should be enacted and the storage changes are not
790	/// provided, the block is re-executed to get the storage changes.
791	fn prepare_block_storage_changes(
792		&self,
793		import_block: &mut BlockImportParams<Block>,
794	) -> sp_blockchain::Result<PrepareStorageChangesResult<Block>>
795	where
796		Self: ProvideRuntimeApi<Block>,
797		<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
798	{
799		let parent_hash = import_block.header.parent_hash();
800		let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip);
801		let (enact_state, storage_changes) = match (self.block_status(*parent_hash)?, state_action)
802		{
803			(BlockStatus::KnownBad, _) =>
804				return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)),
805			(
806				BlockStatus::InChainPruned,
807				StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(_)),
808			) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)),
809			(_, StateAction::ApplyChanges(changes)) => (true, Some(changes)),
810			(BlockStatus::Unknown, _) =>
811				return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)),
812			(_, StateAction::Skip) => (false, None),
813			(BlockStatus::InChainPruned, StateAction::Execute) =>
814				return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)),
815			(BlockStatus::InChainPruned, StateAction::ExecuteIfPossible) => (false, None),
816			(_, StateAction::Execute) => (true, None),
817			(_, StateAction::ExecuteIfPossible) => (true, None),
818		};
819
820		let storage_changes = match (enact_state, storage_changes, &import_block.body) {
821			// We have storage changes and should enact the state, so we don't need to do anything
822			// here
823			(true, changes @ Some(_), _) => changes,
824			// We should enact state, but don't have any storage changes, so we need to execute the
825			// block.
826			(true, None, Some(ref body)) => {
827				let mut runtime_api = self.runtime_api();
828				let call_context = CallContext::Onchain;
829				runtime_api.set_call_context(call_context);
830
831				if self.config.enable_import_proof_recording {
832					runtime_api.record_proof();
833					let recorder = runtime_api
834						.proof_recorder()
835						.expect("Proof recording is enabled in the line above; qed.");
836					runtime_api.register_extension(ProofSizeExt::new(recorder));
837				}
838
839				runtime_api.execute_block(
840					*parent_hash,
841					Block::new(import_block.header.clone(), body.clone()).into(),
842				)?;
843
844				let state = self.backend.state_at(*parent_hash, call_context.into())?;
845				let gen_storage_changes = runtime_api
846					.into_storage_changes(&state, *parent_hash)
847					.map_err(sp_blockchain::Error::Storage)?;
848
849				if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root
850				{
851					return Err(Error::InvalidStateRoot)
852				}
853				Some(sc_consensus::StorageChanges::Changes(gen_storage_changes))
854			},
855			// No block body, no storage changes
856			(true, None, None) => None,
857			// We should not enact the state, so we set the storage changes to `None`.
858			(false, _, _) => None,
859		};
860
861		Ok(PrepareStorageChangesResult::Import(storage_changes))
862	}
863
864	fn apply_finality_with_block_hash(
865		&self,
866		operation: &mut ClientImportOperation<Block, B>,
867		hash: Block::Hash,
868		justification: Option<Justification>,
869		info: &BlockchainInfo<Block>,
870		notify: bool,
871	) -> sp_blockchain::Result<()> {
872		if hash == info.finalized_hash {
873			warn!(
874				"Possible safety violation: attempted to re-finalize last finalized block {:?} ",
875				hash,
876			);
877			return Ok(())
878		}
879
880		// Find tree route from last finalized to given block.
881		let route_from_finalized =
882			sp_blockchain::tree_route(self.backend.blockchain(), info.finalized_hash, hash)?;
883
884		if let Some(retracted) = route_from_finalized.retracted().get(0) {
885			warn!(
886				"Safety violation: attempted to revert finalized block {:?} which is not in the \
887				same chain as last finalized {:?}",
888				retracted, info.finalized_hash
889			);
890
891			return Err(sp_blockchain::Error::NotInFinalizedChain)
892		}
893
894		// We may need to coercively update the best block if there is more than one
895		// leaf or if the finalized block number is greater than last best number recorded
896		// by the backend. This last condition may apply in case of consensus implementations
897		// not always checking this condition.
898		let block_number = self
899			.backend
900			.blockchain()
901			.number(hash)?
902			.ok_or(Error::MissingHeader(format!("{hash:?}")))?;
903		if self.backend.blockchain().leaves()?.len() > 1 || info.best_number < block_number {
904			let route_from_best =
905				sp_blockchain::tree_route(self.backend.blockchain(), info.best_hash, hash)?;
906
907			// If the block is not a direct ancestor of the current best chain,
908			// then some other block is the common ancestor.
909			if route_from_best.common_block().hash != hash {
910				// NOTE: we're setting the finalized block as best block, this might
911				// be slightly inaccurate since we might have a "better" block
912				// further along this chain, but since best chain selection logic is
913				// plugable we cannot make a better choice here. usages that need
914				// an accurate "best" block need to go through `SelectChain`
915				// instead.
916				operation.op.mark_head(hash)?;
917			}
918		}
919
920		let enacted = route_from_finalized.enacted();
921		assert!(enacted.len() > 0);
922		for finalize_new in &enacted[..enacted.len() - 1] {
923			operation.op.mark_finalized(finalize_new.hash, None)?;
924		}
925
926		assert_eq!(enacted.last().map(|e| e.hash), Some(hash));
927		operation.op.mark_finalized(hash, justification)?;
928
929		if notify {
930			let finalized =
931				route_from_finalized.enacted().iter().map(|elem| elem.hash).collect::<Vec<_>>();
932
933			let header = self
934				.backend
935				.blockchain()
936				.header(hash)?
937				.expect("Block to finalize expected to be onchain; qed");
938			let block_number = *header.number();
939
940			// The stale blocks that will be displaced after the block is finalized.
941			let mut stale_blocks = Vec::new();
942
943			let stale_heads = self.backend.blockchain().displaced_leaves_after_finalizing(
944				hash,
945				block_number,
946				*header.parent_hash(),
947			)?;
948
949			stale_blocks.extend(stale_heads.displaced_blocks.into_iter().map(|b| StaleBlock {
950				hash: b,
951				is_head: stale_heads.displaced_leaves.iter().any(|(_, h)| *h == b),
952			}));
953
954			operation.notify_finalized = Some(FinalizeSummary { header, finalized, stale_blocks });
955		}
956
957		Ok(())
958	}
959
960	fn notify_finalized(
961		&self,
962		notification: Option<FinalityNotification<Block>>,
963	) -> sp_blockchain::Result<()> {
964		let mut sinks = self.finality_notification_sinks.lock();
965
966		let notification = match notification {
967			Some(notify_finalized) => notify_finalized,
968			None => {
969				// Cleanup any closed finality notification sinks
970				// since we won't be running the loop below which
971				// would also remove any closed sinks.
972				sinks.retain(|sink| !sink.is_closed());
973				return Ok(())
974			},
975		};
976
977		telemetry!(
978			self.telemetry;
979			SUBSTRATE_INFO;
980			"notify.finalized";
981			"height" => format!("{}", notification.header.number()),
982			"best" => ?notification.hash,
983		);
984
985		sinks.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
986
987		Ok(())
988	}
989
990	fn notify_imported(
991		&self,
992		notification: Option<BlockImportNotification<Block>>,
993		import_notification_action: ImportNotificationAction,
994		storage_changes: Option<(StorageCollection, ChildStorageCollection)>,
995	) -> sp_blockchain::Result<()> {
996		let notification = match notification {
997			Some(notify_import) => notify_import,
998			None => {
999				// Cleanup any closed import notification sinks since we won't
1000				// be sending any notifications below which would remove any
1001				// closed sinks. this is necessary since during initial sync we
1002				// won't send any import notifications which could lead to a
1003				// temporary leak of closed/discarded notification sinks (e.g.
1004				// from consensus code).
1005				self.import_notification_sinks.lock().retain(|sink| !sink.is_closed());
1006
1007				self.every_import_notification_sinks.lock().retain(|sink| !sink.is_closed());
1008
1009				return Ok(())
1010			},
1011		};
1012
1013		let trigger_storage_changes_notification = || {
1014			if let Some(storage_changes) = storage_changes {
1015				// TODO [ToDr] How to handle re-orgs? Should we re-emit all storage changes?
1016				self.storage_notifications.trigger(
1017					&notification.hash,
1018					storage_changes.0.into_iter(),
1019					storage_changes.1.into_iter().map(|(sk, v)| (sk, v.into_iter())),
1020				);
1021			}
1022		};
1023
1024		match import_notification_action {
1025			ImportNotificationAction::Both => {
1026				trigger_storage_changes_notification();
1027				self.import_notification_sinks
1028					.lock()
1029					.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
1030
1031				self.every_import_notification_sinks
1032					.lock()
1033					.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
1034			},
1035			ImportNotificationAction::RecentBlock => {
1036				trigger_storage_changes_notification();
1037				self.import_notification_sinks
1038					.lock()
1039					.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
1040
1041				self.every_import_notification_sinks.lock().retain(|sink| !sink.is_closed());
1042			},
1043			ImportNotificationAction::EveryBlock => {
1044				self.every_import_notification_sinks
1045					.lock()
1046					.retain(|sink| sink.unbounded_send(notification.clone()).is_ok());
1047
1048				self.import_notification_sinks.lock().retain(|sink| !sink.is_closed());
1049			},
1050			ImportNotificationAction::None => {
1051				// This branch is unreachable in fact because the block import notification must be
1052				// Some(_) instead of None (it's already handled at the beginning of this function)
1053				// at this point.
1054				self.import_notification_sinks.lock().retain(|sink| !sink.is_closed());
1055
1056				self.every_import_notification_sinks.lock().retain(|sink| !sink.is_closed());
1057			},
1058		}
1059
1060		Ok(())
1061	}
1062
1063	/// Attempts to revert the chain by `n` blocks guaranteeing that no block is
1064	/// reverted past the last finalized block. Returns the number of blocks
1065	/// that were successfully reverted.
1066	pub fn revert(&self, n: NumberFor<Block>) -> sp_blockchain::Result<NumberFor<Block>> {
1067		let (number, _) = self.backend.revert(n, false)?;
1068		Ok(number)
1069	}
1070
1071	/// Attempts to revert the chain by `n` blocks disregarding finality. This method will revert
1072	/// any finalized blocks as requested and can potentially leave the node in an inconsistent
1073	/// state. Other modules in the system that persist data and that rely on finality
1074	/// (e.g. consensus parts) will be unaffected by the revert. Use this method with caution and
1075	/// making sure that no other data needs to be reverted for consistency aside from the block
1076	/// data. If `blacklist` is set to true, will also blacklist reverted blocks from finalizing
1077	/// again. The blacklist is reset upon client restart.
1078	///
1079	/// Returns the number of blocks that were successfully reverted.
1080	pub fn unsafe_revert(
1081		&mut self,
1082		n: NumberFor<Block>,
1083		blacklist: bool,
1084	) -> sp_blockchain::Result<NumberFor<Block>> {
1085		let (number, reverted) = self.backend.revert(n, true)?;
1086		if blacklist {
1087			for b in reverted {
1088				self.block_rules.mark_bad(b);
1089			}
1090		}
1091		Ok(number)
1092	}
1093
1094	/// Get blockchain info.
1095	pub fn chain_info(&self) -> BlockchainInfo<Block> {
1096		self.backend.blockchain().info()
1097	}
1098
1099	/// Get block status.
1100	pub fn block_status(&self, hash: Block::Hash) -> sp_blockchain::Result<BlockStatus> {
1101		// this can probably be implemented more efficiently
1102		if self
1103			.importing_block
1104			.read()
1105			.as_ref()
1106			.map_or(false, |importing| &hash == importing)
1107		{
1108			return Ok(BlockStatus::Queued)
1109		}
1110
1111		let hash_and_number = self.backend.blockchain().number(hash)?.map(|n| (hash, n));
1112		match hash_and_number {
1113			Some((hash, number)) =>
1114				if self.backend.have_state_at(hash, number) {
1115					Ok(BlockStatus::InChainWithState)
1116				} else {
1117					Ok(BlockStatus::InChainPruned)
1118				},
1119			None => Ok(BlockStatus::Unknown),
1120		}
1121	}
1122
1123	/// Get block header by id.
1124	pub fn header(
1125		&self,
1126		hash: Block::Hash,
1127	) -> sp_blockchain::Result<Option<<Block as BlockT>::Header>> {
1128		self.backend.blockchain().header(hash)
1129	}
1130
1131	/// Get block body by id.
1132	pub fn body(
1133		&self,
1134		hash: Block::Hash,
1135	) -> sp_blockchain::Result<Option<Vec<<Block as BlockT>::Extrinsic>>> {
1136		self.backend.blockchain().body(hash)
1137	}
1138
1139	/// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors.
1140	pub fn uncles(
1141		&self,
1142		target_hash: Block::Hash,
1143		max_generation: NumberFor<Block>,
1144	) -> sp_blockchain::Result<Vec<Block::Hash>> {
1145		let load_header = |hash: Block::Hash| -> sp_blockchain::Result<Block::Header> {
1146			self.backend
1147				.blockchain()
1148				.header(hash)?
1149				.ok_or_else(|| Error::UnknownBlock(format!("{:?}", hash)))
1150		};
1151
1152		let genesis_hash = self.backend.blockchain().info().genesis_hash;
1153		if genesis_hash == target_hash {
1154			return Ok(Vec::new())
1155		}
1156
1157		let mut current_hash = target_hash;
1158		let mut current = load_header(current_hash)?;
1159		let mut ancestor_hash = *current.parent_hash();
1160		let mut ancestor = load_header(ancestor_hash)?;
1161		let mut uncles = Vec::new();
1162
1163		let mut generation: NumberFor<Block> = Zero::zero();
1164		while generation < max_generation {
1165			let children = self.backend.blockchain().children(ancestor_hash)?;
1166			uncles.extend(children.into_iter().filter(|h| h != &current_hash));
1167			current_hash = ancestor_hash;
1168
1169			if genesis_hash == current_hash {
1170				break
1171			}
1172
1173			current = ancestor;
1174			ancestor_hash = *current.parent_hash();
1175			ancestor = load_header(ancestor_hash)?;
1176			generation += One::one();
1177		}
1178		trace!("Collected {} uncles", uncles.len());
1179		Ok(uncles)
1180	}
1181}
1182
1183impl<B, E, Block, RA> UsageProvider<Block> for Client<B, E, Block, RA>
1184where
1185	B: backend::Backend<Block>,
1186	E: CallExecutor<Block>,
1187	Block: BlockT,
1188{
1189	/// Get usage info about current client.
1190	fn usage_info(&self) -> ClientInfo<Block> {
1191		ClientInfo { chain: self.chain_info(), usage: self.backend.usage_info() }
1192	}
1193}
1194
1195impl<B, E, Block, RA> ProofProvider<Block> for Client<B, E, Block, RA>
1196where
1197	B: backend::Backend<Block>,
1198	E: CallExecutor<Block>,
1199	Block: BlockT,
1200{
1201	fn read_proof(
1202		&self,
1203		hash: Block::Hash,
1204		keys: &mut dyn Iterator<Item = &[u8]>,
1205	) -> sp_blockchain::Result<StorageProof> {
1206		self.state_at(hash)
1207			.and_then(|state| prove_read(state, keys).map_err(Into::into))
1208	}
1209
1210	fn read_child_proof(
1211		&self,
1212		hash: Block::Hash,
1213		child_info: &ChildInfo,
1214		keys: &mut dyn Iterator<Item = &[u8]>,
1215	) -> sp_blockchain::Result<StorageProof> {
1216		self.state_at(hash)
1217			.and_then(|state| prove_child_read(state, child_info, keys).map_err(Into::into))
1218	}
1219
1220	fn execution_proof(
1221		&self,
1222		hash: Block::Hash,
1223		method: &str,
1224		call_data: &[u8],
1225	) -> sp_blockchain::Result<(Vec<u8>, StorageProof)> {
1226		self.executor.prove_execution(hash, method, call_data)
1227	}
1228
1229	fn read_proof_collection(
1230		&self,
1231		hash: Block::Hash,
1232		start_key: &[Vec<u8>],
1233		size_limit: usize,
1234	) -> sp_blockchain::Result<(CompactProof, u32)> {
1235		let state = self.state_at(hash)?;
1236		// this is a read proof, using version V0 or V1 is equivalent.
1237		let root = state.storage_root(std::iter::empty(), StateVersion::V0).0;
1238
1239		let (proof, count) = prove_range_read_with_child_with_size::<_, HashingFor<Block>>(
1240			state, size_limit, start_key,
1241		)?;
1242		let proof = proof
1243			.into_compact_proof::<HashingFor<Block>>(root)
1244			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?;
1245		Ok((proof, count))
1246	}
1247
1248	fn storage_collection(
1249		&self,
1250		hash: Block::Hash,
1251		start_key: &[Vec<u8>],
1252		size_limit: usize,
1253	) -> sp_blockchain::Result<Vec<(KeyValueStorageLevel, bool)>> {
1254		if start_key.len() > MAX_NESTED_TRIE_DEPTH {
1255			return Err(Error::Backend("Invalid start key.".to_string()))
1256		}
1257		let state = self.state_at(hash)?;
1258		let child_info = |storage_key: &Vec<u8>| -> sp_blockchain::Result<ChildInfo> {
1259			let storage_key = PrefixedStorageKey::new_ref(storage_key);
1260			match ChildType::from_prefixed_key(storage_key) {
1261				Some((ChildType::ParentKeyId, storage_key)) =>
1262					Ok(ChildInfo::new_default(storage_key)),
1263				None => Err(Error::Backend("Invalid child storage key.".to_string())),
1264			}
1265		};
1266		let mut current_child = if start_key.len() == 2 {
1267			let start_key = start_key.get(0).expect("checked len");
1268			if let Some(child_root) = state
1269				.storage(start_key)
1270				.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1271			{
1272				Some((child_info(start_key)?, child_root))
1273			} else {
1274				return Err(Error::Backend("Invalid root start key.".to_string()))
1275			}
1276		} else {
1277			None
1278		};
1279		let mut current_key = start_key.last().map(Clone::clone).unwrap_or_default();
1280		let mut total_size = 0;
1281		let mut result = vec![(
1282			KeyValueStorageLevel {
1283				state_root: Vec::new(),
1284				key_values: Vec::new(),
1285				parent_storage_keys: Vec::new(),
1286			},
1287			false,
1288		)];
1289
1290		let mut child_roots = HashSet::new();
1291		loop {
1292			let mut entries = Vec::new();
1293			let mut complete = true;
1294			let mut switch_child_key = None;
1295			while let Some(next_key) = if let Some(child) = current_child.as_ref() {
1296				state
1297					.next_child_storage_key(&child.0, &current_key)
1298					.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1299			} else {
1300				state
1301					.next_storage_key(&current_key)
1302					.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1303			} {
1304				let value = if let Some(child) = current_child.as_ref() {
1305					state
1306						.child_storage(&child.0, next_key.as_ref())
1307						.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1308						.unwrap_or_default()
1309				} else {
1310					state
1311						.storage(next_key.as_ref())
1312						.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1313						.unwrap_or_default()
1314				};
1315				let size = value.len() + next_key.len();
1316				if total_size + size > size_limit && !entries.is_empty() {
1317					complete = false;
1318					break
1319				}
1320				total_size += size;
1321
1322				if current_child.is_none() &&
1323					sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) &&
1324					!child_roots.contains(value.as_slice())
1325				{
1326					child_roots.insert(value.clone());
1327					switch_child_key = Some((next_key.clone(), value.clone()));
1328					entries.push((next_key.clone(), value));
1329					break
1330				}
1331				entries.push((next_key.clone(), value));
1332				current_key = next_key;
1333			}
1334			if let Some((child, child_root)) = switch_child_key.take() {
1335				result[0].0.key_values.extend(entries.into_iter());
1336				current_child = Some((child_info(&child)?, child_root));
1337				current_key = Vec::new();
1338			} else if let Some((child, child_root)) = current_child.take() {
1339				current_key = child.into_prefixed_storage_key().into_inner();
1340				result.push((
1341					KeyValueStorageLevel {
1342						state_root: child_root,
1343						key_values: entries,
1344						parent_storage_keys: Vec::new(),
1345					},
1346					complete,
1347				));
1348				if !complete {
1349					break
1350				}
1351			} else {
1352				result[0].0.key_values.extend(entries.into_iter());
1353				result[0].1 = complete;
1354				break
1355			}
1356		}
1357		Ok(result)
1358	}
1359
1360	fn verify_range_proof(
1361		&self,
1362		root: Block::Hash,
1363		proof: CompactProof,
1364		start_key: &[Vec<u8>],
1365	) -> sp_blockchain::Result<(KeyValueStates, usize)> {
1366		let mut db = sp_state_machine::MemoryDB::<HashingFor<Block>>::new(&[]);
1367		// Compact encoding
1368		sp_trie::decode_compact::<sp_state_machine::LayoutV0<HashingFor<Block>>, _, _>(
1369			&mut db,
1370			proof.iter_compact_encoded_nodes(),
1371			Some(&root),
1372		)
1373		.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?;
1374		let proving_backend = sp_state_machine::TrieBackendBuilder::new(db, root).build();
1375		let state = read_range_proof_check_with_child_on_proving_backend::<HashingFor<Block>>(
1376			&proving_backend,
1377			start_key,
1378		)?;
1379
1380		Ok(state)
1381	}
1382}
1383
1384impl<B, E, Block, RA> ExecutorProvider<Block> for Client<B, E, Block, RA>
1385where
1386	B: backend::Backend<Block>,
1387	E: CallExecutor<Block>,
1388	Block: BlockT,
1389{
1390	type Executor = E;
1391
1392	fn executor(&self) -> &Self::Executor {
1393		&self.executor
1394	}
1395
1396	fn execution_extensions(&self) -> &ExecutionExtensions<Block> {
1397		self.executor.execution_extensions()
1398	}
1399}
1400
1401impl<B, E, Block, RA> StorageProvider<Block, B> for Client<B, E, Block, RA>
1402where
1403	B: backend::Backend<Block>,
1404	E: CallExecutor<Block>,
1405	Block: BlockT,
1406{
1407	fn storage_keys(
1408		&self,
1409		hash: <Block as BlockT>::Hash,
1410		prefix: Option<&StorageKey>,
1411		start_key: Option<&StorageKey>,
1412	) -> sp_blockchain::Result<KeysIter<B::State, Block>> {
1413		let state = self.state_at(hash)?;
1414		KeysIter::new(state, prefix, start_key)
1415			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1416	}
1417
1418	fn child_storage_keys(
1419		&self,
1420		hash: <Block as BlockT>::Hash,
1421		child_info: ChildInfo,
1422		prefix: Option<&StorageKey>,
1423		start_key: Option<&StorageKey>,
1424	) -> sp_blockchain::Result<KeysIter<B::State, Block>> {
1425		let state = self.state_at(hash)?;
1426		KeysIter::new_child(state, child_info, prefix, start_key)
1427			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1428	}
1429
1430	fn storage_pairs(
1431		&self,
1432		hash: <Block as BlockT>::Hash,
1433		prefix: Option<&StorageKey>,
1434		start_key: Option<&StorageKey>,
1435	) -> sp_blockchain::Result<PairsIter<B::State, Block>> {
1436		let state = self.state_at(hash)?;
1437		PairsIter::new(state, prefix, start_key)
1438			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1439	}
1440
1441	fn storage(
1442		&self,
1443		hash: Block::Hash,
1444		key: &StorageKey,
1445	) -> sp_blockchain::Result<Option<StorageData>> {
1446		Ok(self
1447			.state_at(hash)?
1448			.storage(&key.0)
1449			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1450			.map(StorageData))
1451	}
1452
1453	fn storage_hash(
1454		&self,
1455		hash: <Block as BlockT>::Hash,
1456		key: &StorageKey,
1457	) -> sp_blockchain::Result<Option<Block::Hash>> {
1458		self.state_at(hash)?
1459			.storage_hash(&key.0)
1460			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1461	}
1462
1463	fn child_storage(
1464		&self,
1465		hash: <Block as BlockT>::Hash,
1466		child_info: &ChildInfo,
1467		key: &StorageKey,
1468	) -> sp_blockchain::Result<Option<StorageData>> {
1469		Ok(self
1470			.state_at(hash)?
1471			.child_storage(child_info, &key.0)
1472			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
1473			.map(StorageData))
1474	}
1475
1476	fn child_storage_hash(
1477		&self,
1478		hash: <Block as BlockT>::Hash,
1479		child_info: &ChildInfo,
1480		key: &StorageKey,
1481	) -> sp_blockchain::Result<Option<Block::Hash>> {
1482		self.state_at(hash)?
1483			.child_storage_hash(child_info, &key.0)
1484			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1485	}
1486
1487	fn closest_merkle_value(
1488		&self,
1489		hash: <Block as BlockT>::Hash,
1490		key: &StorageKey,
1491	) -> blockchain::Result<Option<MerkleValue<<Block as BlockT>::Hash>>> {
1492		self.state_at(hash)?
1493			.closest_merkle_value(&key.0)
1494			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1495	}
1496
1497	fn child_closest_merkle_value(
1498		&self,
1499		hash: <Block as BlockT>::Hash,
1500		child_info: &ChildInfo,
1501		key: &StorageKey,
1502	) -> blockchain::Result<Option<MerkleValue<<Block as BlockT>::Hash>>> {
1503		self.state_at(hash)?
1504			.child_closest_merkle_value(child_info, &key.0)
1505			.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
1506	}
1507}
1508
1509impl<B, E, Block, RA> HeaderMetadata<Block> for Client<B, E, Block, RA>
1510where
1511	B: backend::Backend<Block>,
1512	E: CallExecutor<Block>,
1513	Block: BlockT,
1514{
1515	type Error = sp_blockchain::Error;
1516
1517	fn header_metadata(
1518		&self,
1519		hash: Block::Hash,
1520	) -> Result<CachedHeaderMetadata<Block>, Self::Error> {
1521		self.backend.blockchain().header_metadata(hash)
1522	}
1523
1524	fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata<Block>) {
1525		self.backend.blockchain().insert_header_metadata(hash, metadata)
1526	}
1527
1528	fn remove_header_metadata(&self, hash: Block::Hash) {
1529		self.backend.blockchain().remove_header_metadata(hash)
1530	}
1531}
1532
1533impl<B, E, Block, RA> ProvideUncles<Block> for Client<B, E, Block, RA>
1534where
1535	B: backend::Backend<Block>,
1536	E: CallExecutor<Block>,
1537	Block: BlockT,
1538{
1539	fn uncles(
1540		&self,
1541		target_hash: Block::Hash,
1542		max_generation: NumberFor<Block>,
1543	) -> sp_blockchain::Result<Vec<Block::Header>> {
1544		Ok(Client::uncles(self, target_hash, max_generation)?
1545			.into_iter()
1546			.filter_map(|hash| Client::header(self, hash).unwrap_or(None))
1547			.collect())
1548	}
1549}
1550
1551impl<B, E, Block, RA> ChainHeaderBackend<Block> for Client<B, E, Block, RA>
1552where
1553	B: backend::Backend<Block>,
1554	E: CallExecutor<Block> + Send + Sync,
1555	Block: BlockT,
1556	RA: Send + Sync,
1557{
1558	fn header(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Block::Header>> {
1559		self.backend.blockchain().header(hash)
1560	}
1561
1562	fn info(&self) -> blockchain::Info<Block> {
1563		self.backend.blockchain().info()
1564	}
1565
1566	fn status(&self, hash: Block::Hash) -> sp_blockchain::Result<blockchain::BlockStatus> {
1567		self.backend.blockchain().status(hash)
1568	}
1569
1570	fn number(
1571		&self,
1572		hash: Block::Hash,
1573	) -> sp_blockchain::Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>> {
1574		self.backend.blockchain().number(hash)
1575	}
1576
1577	fn hash(&self, number: NumberFor<Block>) -> sp_blockchain::Result<Option<Block::Hash>> {
1578		self.backend.blockchain().hash(number)
1579	}
1580}
1581
1582impl<B, E, Block, RA> BlockIdTo<Block> for Client<B, E, Block, RA>
1583where
1584	B: backend::Backend<Block>,
1585	E: CallExecutor<Block> + Send + Sync,
1586	Block: BlockT,
1587	RA: Send + Sync,
1588{
1589	type Error = Error;
1590
1591	fn to_hash(&self, block_id: &BlockId<Block>) -> sp_blockchain::Result<Option<Block::Hash>> {
1592		self.block_hash_from_id(block_id)
1593	}
1594
1595	fn to_number(
1596		&self,
1597		block_id: &BlockId<Block>,
1598	) -> sp_blockchain::Result<Option<NumberFor<Block>>> {
1599		self.block_number_from_id(block_id)
1600	}
1601}
1602
1603impl<B, E, Block, RA> ChainHeaderBackend<Block> for &Client<B, E, Block, RA>
1604where
1605	B: backend::Backend<Block>,
1606	E: CallExecutor<Block> + Send + Sync,
1607	Block: BlockT,
1608	RA: Send + Sync,
1609{
1610	fn header(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Block::Header>> {
1611		self.backend.blockchain().header(hash)
1612	}
1613
1614	fn info(&self) -> blockchain::Info<Block> {
1615		self.backend.blockchain().info()
1616	}
1617
1618	fn status(&self, hash: Block::Hash) -> sp_blockchain::Result<blockchain::BlockStatus> {
1619		(**self).status(hash)
1620	}
1621
1622	fn number(
1623		&self,
1624		hash: Block::Hash,
1625	) -> sp_blockchain::Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>> {
1626		(**self).number(hash)
1627	}
1628
1629	fn hash(&self, number: NumberFor<Block>) -> sp_blockchain::Result<Option<Block::Hash>> {
1630		(**self).hash(number)
1631	}
1632}
1633
1634impl<B, E, Block, RA> ProvideRuntimeApi<Block> for Client<B, E, Block, RA>
1635where
1636	B: backend::Backend<Block>,
1637	E: CallExecutor<Block, Backend = B> + Send + Sync,
1638	Block: BlockT,
1639	RA: ConstructRuntimeApi<Block, Self> + Send + Sync,
1640{
1641	type Api = <RA as ConstructRuntimeApi<Block, Self>>::RuntimeApi;
1642
1643	fn runtime_api(&self) -> ApiRef<'_, Self::Api> {
1644		RA::construct_runtime_api(self)
1645	}
1646}
1647
1648impl<B, E, Block, RA> CallApiAt<Block> for Client<B, E, Block, RA>
1649where
1650	B: backend::Backend<Block>,
1651	E: CallExecutor<Block, Backend = B> + Send + Sync,
1652	Block: BlockT,
1653	RA: Send + Sync,
1654{
1655	type StateBackend = B::State;
1656
1657	fn call_api_at(&self, params: CallApiAtParams<Block>) -> Result<Vec<u8>, sp_api::ApiError> {
1658		self.executor
1659			.contextual_call(
1660				params.at,
1661				params.function,
1662				&params.arguments,
1663				params.overlayed_changes,
1664				params.recorder,
1665				params.call_context,
1666				params.extensions,
1667			)
1668			.map_err(Into::into)
1669	}
1670
1671	fn runtime_version_at(&self, hash: Block::Hash) -> Result<RuntimeVersion, sp_api::ApiError> {
1672		CallExecutor::runtime_version(&self.executor, hash).map_err(Into::into)
1673	}
1674
1675	fn state_at(&self, at: Block::Hash) -> Result<Self::StateBackend, sp_api::ApiError> {
1676		self.state_at(at).map_err(Into::into)
1677	}
1678
1679	fn initialize_extensions(
1680		&self,
1681		at: Block::Hash,
1682		extensions: &mut sp_externalities::Extensions,
1683	) -> Result<(), sp_api::ApiError> {
1684		let block_number = self.expect_block_number_from_id(&BlockId::Hash(at))?;
1685
1686		extensions.merge(self.executor.execution_extensions().extensions(at, block_number));
1687
1688		Ok(())
1689	}
1690}
1691
1692/// NOTE: only use this implementation when you are sure there are NO consensus-level BlockImport
1693/// objects. Otherwise, importing blocks directly into the client would be bypassing
1694/// important verification work.
1695#[async_trait::async_trait]
1696impl<B, E, Block, RA> sc_consensus::BlockImport<Block> for &Client<B, E, Block, RA>
1697where
1698	B: backend::Backend<Block>,
1699	E: CallExecutor<Block> + Send + Sync,
1700	Block: BlockT,
1701	Client<B, E, Block, RA>: ProvideRuntimeApi<Block>,
1702	<Client<B, E, Block, RA> as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
1703	RA: Sync + Send,
1704{
1705	type Error = ConsensusError;
1706
1707	/// Import a checked and validated block.
1708	///
1709	/// NOTE: only use this implementation when there are NO consensus-level BlockImport
1710	/// objects. Otherwise, importing blocks directly into the client would be bypassing
1711	/// important verification work.
1712	///
1713	/// If you are not sure that there are no BlockImport objects provided by the consensus
1714	/// algorithm, don't use this function.
1715	async fn import_block(
1716		&self,
1717		mut import_block: BlockImportParams<Block>,
1718	) -> Result<ImportResult, Self::Error> {
1719		let span = tracing::span!(tracing::Level::DEBUG, "import_block");
1720		let _enter = span.enter();
1721
1722		let storage_changes =
1723			match self.prepare_block_storage_changes(&mut import_block).map_err(|e| {
1724				warn!("Block prepare storage changes error: {}", e);
1725				ConsensusError::ClientImport(e.to_string())
1726			})? {
1727				PrepareStorageChangesResult::Discard(res) => return Ok(res),
1728				PrepareStorageChangesResult::Import(storage_changes) => storage_changes,
1729			};
1730
1731		self.lock_import_and_run(|operation| {
1732			self.apply_block(operation, import_block, storage_changes)
1733		})
1734		.map_err(|e| {
1735			warn!("Block import error: {}", e);
1736			ConsensusError::ClientImport(e.to_string())
1737		})
1738	}
1739
1740	/// Check block preconditions.
1741	async fn check_block(
1742		&self,
1743		block: BlockCheckParams<Block>,
1744	) -> Result<ImportResult, Self::Error> {
1745		let BlockCheckParams {
1746			hash,
1747			number,
1748			parent_hash,
1749			allow_missing_state,
1750			import_existing,
1751			allow_missing_parent,
1752		} = block;
1753
1754		// Check the block against white and black lists if any are defined
1755		// (i.e. fork blocks and bad blocks respectively)
1756		match self.block_rules.lookup(number, &hash) {
1757			BlockLookupResult::KnownBad => {
1758				trace!("Rejecting known bad block: #{} {:?}", number, hash);
1759				return Ok(ImportResult::KnownBad)
1760			},
1761			BlockLookupResult::Expected(expected_hash) => {
1762				trace!(
1763					"Rejecting block from known invalid fork. Got {:?}, expected: {:?} at height {}",
1764					hash,
1765					expected_hash,
1766					number
1767				);
1768				return Ok(ImportResult::KnownBad)
1769			},
1770			BlockLookupResult::NotSpecial => {},
1771		}
1772
1773		// Own status must be checked first. If the block and ancestry is pruned
1774		// this function must return `AlreadyInChain` rather than `MissingState`
1775		match self
1776			.block_status(hash)
1777			.map_err(|e| ConsensusError::ClientImport(e.to_string()))?
1778		{
1779			BlockStatus::InChainWithState | BlockStatus::Queued =>
1780				return Ok(ImportResult::AlreadyInChain),
1781			BlockStatus::InChainPruned if !import_existing =>
1782				return Ok(ImportResult::AlreadyInChain),
1783			BlockStatus::InChainPruned => {},
1784			BlockStatus::Unknown => {},
1785			BlockStatus::KnownBad => return Ok(ImportResult::KnownBad),
1786		}
1787
1788		match self
1789			.block_status(parent_hash)
1790			.map_err(|e| ConsensusError::ClientImport(e.to_string()))?
1791		{
1792			BlockStatus::InChainWithState | BlockStatus::Queued => {},
1793			BlockStatus::Unknown if allow_missing_parent => {},
1794			BlockStatus::Unknown => return Ok(ImportResult::UnknownParent),
1795			BlockStatus::InChainPruned if allow_missing_state => {},
1796			BlockStatus::InChainPruned => return Ok(ImportResult::MissingState),
1797			BlockStatus::KnownBad => return Ok(ImportResult::KnownBad),
1798		}
1799
1800		Ok(ImportResult::imported(false))
1801	}
1802}
1803
1804#[async_trait::async_trait]
1805impl<B, E, Block, RA> sc_consensus::BlockImport<Block> for Client<B, E, Block, RA>
1806where
1807	B: backend::Backend<Block>,
1808	E: CallExecutor<Block> + Send + Sync,
1809	Block: BlockT,
1810	Self: ProvideRuntimeApi<Block>,
1811	<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
1812	RA: Sync + Send,
1813{
1814	type Error = ConsensusError;
1815
1816	async fn check_block(
1817		&self,
1818		block: BlockCheckParams<Block>,
1819	) -> Result<ImportResult, Self::Error> {
1820		(&self).check_block(block).await
1821	}
1822
1823	async fn import_block(
1824		&self,
1825		import_block: BlockImportParams<Block>,
1826	) -> Result<ImportResult, Self::Error> {
1827		(&self).import_block(import_block).await
1828	}
1829}
1830
1831impl<B, E, Block, RA> Finalizer<Block, B> for Client<B, E, Block, RA>
1832where
1833	B: backend::Backend<Block>,
1834	E: CallExecutor<Block>,
1835	Block: BlockT,
1836{
1837	fn apply_finality(
1838		&self,
1839		operation: &mut ClientImportOperation<Block, B>,
1840		hash: Block::Hash,
1841		justification: Option<Justification>,
1842		notify: bool,
1843	) -> sp_blockchain::Result<()> {
1844		let info = self.backend.blockchain().info();
1845		self.apply_finality_with_block_hash(operation, hash, justification, &info, notify)
1846	}
1847
1848	fn finalize_block(
1849		&self,
1850		hash: Block::Hash,
1851		justification: Option<Justification>,
1852		notify: bool,
1853	) -> sp_blockchain::Result<()> {
1854		self.lock_import_and_run(|operation| {
1855			self.apply_finality(operation, hash, justification, notify)
1856		})
1857	}
1858}
1859
1860impl<B, E, Block, RA> Finalizer<Block, B> for &Client<B, E, Block, RA>
1861where
1862	B: backend::Backend<Block>,
1863	E: CallExecutor<Block>,
1864	Block: BlockT,
1865{
1866	fn apply_finality(
1867		&self,
1868		operation: &mut ClientImportOperation<Block, B>,
1869		hash: Block::Hash,
1870		justification: Option<Justification>,
1871		notify: bool,
1872	) -> sp_blockchain::Result<()> {
1873		(**self).apply_finality(operation, hash, justification, notify)
1874	}
1875
1876	fn finalize_block(
1877		&self,
1878		hash: Block::Hash,
1879		justification: Option<Justification>,
1880		notify: bool,
1881	) -> sp_blockchain::Result<()> {
1882		(**self).finalize_block(hash, justification, notify)
1883	}
1884}
1885
1886impl<B, E, Block, RA> PreCommitActions<Block> for Client<B, E, Block, RA>
1887where
1888	Block: BlockT,
1889{
1890	fn register_import_action(&self, action: OnImportAction<Block>) {
1891		self.import_actions.lock().push(action);
1892	}
1893
1894	fn register_finality_action(&self, action: OnFinalityAction<Block>) {
1895		self.finality_actions.lock().push(action);
1896	}
1897}
1898
1899impl<B, E, Block, RA> BlockchainEvents<Block> for Client<B, E, Block, RA>
1900where
1901	E: CallExecutor<Block>,
1902	Block: BlockT,
1903{
1904	/// Get block import event stream.
1905	fn import_notification_stream(&self) -> ImportNotifications<Block> {
1906		let (sink, stream) = tracing_unbounded("mpsc_import_notification_stream", 100_000);
1907		self.import_notification_sinks.lock().push(sink);
1908		stream
1909	}
1910
1911	fn every_import_notification_stream(&self) -> ImportNotifications<Block> {
1912		let (sink, stream) = tracing_unbounded("mpsc_every_import_notification_stream", 100_000);
1913		self.every_import_notification_sinks.lock().push(sink);
1914		stream
1915	}
1916
1917	fn finality_notification_stream(&self) -> FinalityNotifications<Block> {
1918		let (sink, stream) = tracing_unbounded("mpsc_finality_notification_stream", 100_000);
1919		self.finality_notification_sinks.lock().push(sink);
1920		stream
1921	}
1922
1923	/// Get storage changes event stream.
1924	fn storage_changes_notification_stream(
1925		&self,
1926		filter_keys: Option<&[StorageKey]>,
1927		child_filter_keys: Option<&[(StorageKey, Option<Vec<StorageKey>>)]>,
1928	) -> sp_blockchain::Result<StorageEventStream<Block::Hash>> {
1929		Ok(self.storage_notifications.listen(filter_keys, child_filter_keys))
1930	}
1931}
1932
1933impl<B, E, Block, RA> BlockBackend<Block> for Client<B, E, Block, RA>
1934where
1935	B: backend::Backend<Block>,
1936	E: CallExecutor<Block>,
1937	Block: BlockT,
1938{
1939	fn block_body(
1940		&self,
1941		hash: Block::Hash,
1942	) -> sp_blockchain::Result<Option<Vec<<Block as BlockT>::Extrinsic>>> {
1943		self.body(hash)
1944	}
1945
1946	fn block(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<SignedBlock<Block>>> {
1947		Ok(match (self.header(hash)?, self.body(hash)?, self.justifications(hash)?) {
1948			(Some(header), Some(extrinsics), justifications) =>
1949				Some(SignedBlock { block: Block::new(header, extrinsics), justifications }),
1950			_ => None,
1951		})
1952	}
1953
1954	fn block_status(&self, hash: Block::Hash) -> sp_blockchain::Result<BlockStatus> {
1955		Client::block_status(self, hash)
1956	}
1957
1958	fn justifications(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Justifications>> {
1959		self.backend.blockchain().justifications(hash)
1960	}
1961
1962	fn block_hash(&self, number: NumberFor<Block>) -> sp_blockchain::Result<Option<Block::Hash>> {
1963		self.backend.blockchain().hash(number)
1964	}
1965
1966	fn indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Vec<u8>>> {
1967		self.backend.blockchain().indexed_transaction(hash)
1968	}
1969
1970	fn has_indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result<bool> {
1971		self.backend.blockchain().has_indexed_transaction(hash)
1972	}
1973
1974	fn block_indexed_body(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Vec<Vec<u8>>>> {
1975		self.backend.blockchain().block_indexed_body(hash)
1976	}
1977
1978	fn requires_full_sync(&self) -> bool {
1979		self.backend.requires_full_sync()
1980	}
1981}
1982
1983impl<B, E, Block, RA> backend::AuxStore for Client<B, E, Block, RA>
1984where
1985	B: backend::Backend<Block>,
1986	E: CallExecutor<Block>,
1987	Block: BlockT,
1988	Self: ProvideRuntimeApi<Block>,
1989	<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block>,
1990{
1991	/// Insert auxiliary data into key-value store.
1992	fn insert_aux<
1993		'a,
1994		'b: 'a,
1995		'c: 'a,
1996		I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
1997		D: IntoIterator<Item = &'a &'b [u8]>,
1998	>(
1999		&self,
2000		insert: I,
2001		delete: D,
2002	) -> sp_blockchain::Result<()> {
2003		// Import is locked here because we may have other block import
2004		// operations that tries to set aux data. Note that for consensus
2005		// layer, one can always use atomic operations to make sure
2006		// import is only locked once.
2007		self.lock_import_and_run(|operation| apply_aux(operation, insert, delete))
2008	}
2009	/// Query auxiliary data from key-value store.
2010	fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result<Option<Vec<u8>>> {
2011		backend::AuxStore::get_aux(&*self.backend, key)
2012	}
2013}
2014
2015impl<B, E, Block, RA> backend::AuxStore for &Client<B, E, Block, RA>
2016where
2017	B: backend::Backend<Block>,
2018	E: CallExecutor<Block>,
2019	Block: BlockT,
2020	Client<B, E, Block, RA>: ProvideRuntimeApi<Block>,
2021	<Client<B, E, Block, RA> as ProvideRuntimeApi<Block>>::Api: CoreApi<Block>,
2022{
2023	fn insert_aux<
2024		'a,
2025		'b: 'a,
2026		'c: 'a,
2027		I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
2028		D: IntoIterator<Item = &'a &'b [u8]>,
2029	>(
2030		&self,
2031		insert: I,
2032		delete: D,
2033	) -> sp_blockchain::Result<()> {
2034		(**self).insert_aux(insert, delete)
2035	}
2036
2037	fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result<Option<Vec<u8>>> {
2038		(**self).get_aux(key)
2039	}
2040}
2041
2042impl<BE, E, B, RA> sp_consensus::block_validation::Chain<B> for Client<BE, E, B, RA>
2043where
2044	BE: backend::Backend<B>,
2045	E: CallExecutor<B>,
2046	B: BlockT,
2047{
2048	fn block_status(
2049		&self,
2050		hash: B::Hash,
2051	) -> Result<BlockStatus, Box<dyn std::error::Error + Send>> {
2052		Client::block_status(self, hash).map_err(|e| Box::new(e) as Box<_>)
2053	}
2054}
2055
2056impl<BE, E, B, RA> sp_transaction_storage_proof::IndexedBody<B> for Client<BE, E, B, RA>
2057where
2058	BE: backend::Backend<B>,
2059	E: CallExecutor<B>,
2060	B: BlockT,
2061{
2062	fn block_indexed_body(
2063		&self,
2064		number: NumberFor<B>,
2065	) -> Result<Option<Vec<Vec<u8>>>, sp_transaction_storage_proof::Error> {
2066		let hash = match self
2067			.backend
2068			.blockchain()
2069			.block_hash_from_id(&BlockId::Number(number))
2070			.map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e)))?
2071		{
2072			Some(hash) => hash,
2073			None => return Ok(None),
2074		};
2075
2076		self.backend
2077			.blockchain()
2078			.block_indexed_body(hash)
2079			.map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e)))
2080	}
2081
2082	fn number(
2083		&self,
2084		hash: B::Hash,
2085	) -> Result<Option<NumberFor<B>>, sp_transaction_storage_proof::Error> {
2086		self.backend
2087			.blockchain()
2088			.number(hash)
2089			.map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e)))
2090	}
2091}