Skip to main content

sc_client_db/
lib.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19//! Client backend that is backed by a database.
20//!
21//! # Canonicality vs. Finality
22//!
23//! Finality indicates that a block will not be reverted, according to the consensus algorithm,
24//! while canonicality indicates that the block may be reverted, but we will be unable to do so,
25//! having discarded heavy state that will allow a chain reorganization.
26//!
27//! Finality implies canonicality but not vice-versa.
28
29#![warn(missing_docs)]
30
31pub mod offchain;
32
33pub mod bench;
34
35mod children;
36mod parity_db;
37mod pinned_blocks_cache;
38mod record_stats_state;
39mod stats;
40#[cfg(any(feature = "rocksdb", test))]
41mod upgrade;
42mod utils;
43
44use linked_hash_map::LinkedHashMap;
45use log::{debug, trace, warn};
46use parking_lot::{Mutex, RwLock};
47use prometheus_endpoint::Registry;
48use std::{
49	collections::{HashMap, HashSet},
50	io,
51	path::{Path, PathBuf},
52	sync::Arc,
53};
54
55use crate::{
56	pinned_blocks_cache::PinnedBlocksCache,
57	record_stats_state::RecordStatsState,
58	stats::StateUsageStats,
59	utils::{meta_keys, read_db, read_meta, remove_from_db, DatabaseType, Meta},
60};
61use codec::{Decode, Encode};
62use hash_db::Prefix;
63use sc_client_api::{
64	backend::NewBlockState,
65	blockchain::{BlockGap, BlockGapType},
66	leaves::{FinalizationOutcome, LeafSet},
67	utils::is_descendent_of,
68	IoInfo, MemoryInfo, MemorySize, TrieCacheContext, UsageInfo,
69};
70use sc_state_db::{IsPruned, LastCanonicalized, StateDb};
71use sp_arithmetic::traits::Saturating;
72use sp_blockchain::{
73	Backend as _, CachedHeaderMetadata, DisplacedLeavesAfterFinalization, Error as ClientError,
74	HeaderBackend, HeaderMetadata, HeaderMetadataCache, Result as ClientResult,
75};
76use sp_core::{
77	offchain::OffchainOverlayedChange,
78	storage::{well_known_keys, ChildInfo},
79};
80use sp_database::Transaction;
81use sp_runtime::{
82	generic::BlockId,
83	traits::{
84		Block as BlockT, Hash, HashingFor, Header as HeaderT, NumberFor, One, SaturatedConversion,
85		Zero,
86	},
87	Justification, Justifications, StateVersion, Storage,
88};
89use sp_state_machine::{
90	backend::{AsTrieBackend, Backend as StateBackend},
91	BackendTransaction, ChildStorageCollection, DBValue, IndexOperation, IterArgs,
92	OffchainChangesCollection, StateMachineStats, StorageCollection, StorageIterator, StorageKey,
93	StorageValue, UsageInfo as StateUsageInfo,
94};
95use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, MerkleValue, PrefixedMemoryDB};
96use utils::BLOCK_GAP_CURRENT_VERSION;
97
98// Re-export the Database trait so that one can pass an implementation of it.
99pub use sc_state_db::PruningMode;
100pub use sp_database::Database;
101
102pub use bench::BenchmarkingState;
103
104/// Filter to determine if a block should be excluded from pruning.
105///
106/// Note: This filter only affects **block body** (and future header) pruning.
107/// It does **not** affect state pruning, which is configured separately.
108pub trait PruningFilter: Send + Sync {
109	/// Check if a block with the given justifications should be preserved.
110	///
111	/// Returns `true` to preserve the block, `false` to allow pruning.
112	fn should_retain(&self, justifications: &Justifications) -> bool;
113}
114
115impl<F> PruningFilter for F
116where
117	F: Fn(&Justifications) -> bool + Send + Sync,
118{
119	fn should_retain(&self, justifications: &Justifications) -> bool {
120		(self)(justifications)
121	}
122}
123
124const CACHE_HEADERS: usize = 8;
125
126/// DB-backed patricia trie state, transaction type is an overlay of changes to commit.
127pub type DbState<H> = sp_state_machine::TrieBackend<Arc<dyn sp_state_machine::Storage<H>>, H>;
128
129/// Builder for [`DbState`].
130pub type DbStateBuilder<Hasher> =
131	sp_state_machine::TrieBackendBuilder<Arc<dyn sp_state_machine::Storage<Hasher>>, Hasher>;
132
133/// Length of a [`DbHash`].
134const DB_HASH_LEN: usize = 32;
135
136/// Hash type that this backend uses for the database.
137pub type DbHash = sp_core::H256;
138
139/// An extrinsic entry in the database.
140#[derive(Debug, Encode, Decode)]
141enum DbExtrinsic<B: BlockT> {
142	/// Extrinsic that contains indexed data.
143	Indexed {
144		/// Hash of the indexed part.
145		hash: DbHash,
146		/// Extrinsic header.
147		header: Vec<u8>,
148	},
149	/// Complete extrinsic data.
150	Full(B::Extrinsic),
151}
152
153/// A reference tracking state.
154///
155/// It makes sure that the hash we are using stays pinned in storage
156/// until this structure is dropped.
157pub struct RefTrackingState<Block: BlockT> {
158	state: DbState<HashingFor<Block>>,
159	storage: Arc<StorageDb<Block>>,
160	parent_hash: Option<Block::Hash>,
161}
162
163impl<B: BlockT> RefTrackingState<B> {
164	fn new(
165		state: DbState<HashingFor<B>>,
166		storage: Arc<StorageDb<B>>,
167		parent_hash: Option<B::Hash>,
168	) -> Self {
169		RefTrackingState { state, parent_hash, storage }
170	}
171}
172
173impl<B: BlockT> Drop for RefTrackingState<B> {
174	fn drop(&mut self) {
175		if let Some(hash) = &self.parent_hash {
176			self.storage.state_db.unpin(hash);
177		}
178	}
179}
180
181impl<Block: BlockT> std::fmt::Debug for RefTrackingState<Block> {
182	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
183		write!(f, "Block {:?}", self.parent_hash)
184	}
185}
186
187/// A raw iterator over the `RefTrackingState`.
188pub struct RawIter<B: BlockT> {
189	inner: <DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::RawIter,
190}
191
192impl<B: BlockT> StorageIterator<HashingFor<B>> for RawIter<B> {
193	type Backend = RefTrackingState<B>;
194	type Error = <DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::Error;
195
196	fn next_key(&mut self, backend: &Self::Backend) -> Option<Result<StorageKey, Self::Error>> {
197		self.inner.next_key(&backend.state)
198	}
199
200	fn next_pair(
201		&mut self,
202		backend: &Self::Backend,
203	) -> Option<Result<(StorageKey, StorageValue), Self::Error>> {
204		self.inner.next_pair(&backend.state)
205	}
206
207	fn was_complete(&self) -> bool {
208		self.inner.was_complete()
209	}
210}
211
212impl<B: BlockT> StateBackend<HashingFor<B>> for RefTrackingState<B> {
213	type Error = <DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::Error;
214	type TrieBackendStorage =
215		<DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::TrieBackendStorage;
216	type RawIter = RawIter<B>;
217
218	fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
219		self.state.storage(key)
220	}
221
222	fn storage_hash(&self, key: &[u8]) -> Result<Option<B::Hash>, Self::Error> {
223		self.state.storage_hash(key)
224	}
225
226	fn child_storage(
227		&self,
228		child_info: &ChildInfo,
229		key: &[u8],
230	) -> Result<Option<Vec<u8>>, Self::Error> {
231		self.state.child_storage(child_info, key)
232	}
233
234	fn child_storage_hash(
235		&self,
236		child_info: &ChildInfo,
237		key: &[u8],
238	) -> Result<Option<B::Hash>, Self::Error> {
239		self.state.child_storage_hash(child_info, key)
240	}
241
242	fn closest_merkle_value(
243		&self,
244		key: &[u8],
245	) -> Result<Option<MerkleValue<B::Hash>>, Self::Error> {
246		self.state.closest_merkle_value(key)
247	}
248
249	fn child_closest_merkle_value(
250		&self,
251		child_info: &ChildInfo,
252		key: &[u8],
253	) -> Result<Option<MerkleValue<B::Hash>>, Self::Error> {
254		self.state.child_closest_merkle_value(child_info, key)
255	}
256
257	fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
258		self.state.exists_storage(key)
259	}
260
261	fn exists_child_storage(
262		&self,
263		child_info: &ChildInfo,
264		key: &[u8],
265	) -> Result<bool, Self::Error> {
266		self.state.exists_child_storage(child_info, key)
267	}
268
269	fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
270		self.state.next_storage_key(key)
271	}
272
273	fn next_child_storage_key(
274		&self,
275		child_info: &ChildInfo,
276		key: &[u8],
277	) -> Result<Option<Vec<u8>>, Self::Error> {
278		self.state.next_child_storage_key(child_info, key)
279	}
280
281	fn storage_root<'a>(
282		&self,
283		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
284		state_version: StateVersion,
285	) -> (B::Hash, BackendTransaction<HashingFor<B>>) {
286		self.state.storage_root(delta, state_version)
287	}
288
289	fn child_storage_root<'a>(
290		&self,
291		child_info: &ChildInfo,
292		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
293		state_version: StateVersion,
294	) -> (B::Hash, bool, BackendTransaction<HashingFor<B>>) {
295		self.state.child_storage_root(child_info, delta, state_version)
296	}
297
298	fn raw_iter(&self, args: IterArgs) -> Result<Self::RawIter, Self::Error> {
299		self.state.raw_iter(args).map(|inner| RawIter { inner })
300	}
301
302	fn register_overlay_stats(&self, stats: &StateMachineStats) {
303		self.state.register_overlay_stats(stats);
304	}
305
306	fn usage_info(&self) -> StateUsageInfo {
307		self.state.usage_info()
308	}
309}
310
311impl<B: BlockT> AsTrieBackend<HashingFor<B>> for RefTrackingState<B> {
312	type TrieBackendStorage =
313		<DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::TrieBackendStorage;
314
315	fn as_trie_backend(
316		&self,
317	) -> &sp_state_machine::TrieBackend<Self::TrieBackendStorage, HashingFor<B>> {
318		&self.state.as_trie_backend()
319	}
320}
321
322/// Database settings.
323pub struct DatabaseSettings {
324	/// The maximum trie cache size in bytes.
325	///
326	/// If `None` is given, the cache is disabled.
327	pub trie_cache_maximum_size: Option<usize>,
328	/// Requested state pruning mode.
329	pub state_pruning: Option<PruningMode>,
330	/// Where to find the database.
331	pub source: DatabaseSource,
332	/// Block pruning mode.
333	///
334	/// NOTE: only finalized blocks are subject for removal!
335	pub blocks_pruning: BlocksPruning,
336	/// Filters to exclude blocks from pruning.
337	///
338	/// If any filter returns `true` for a block's justifications, the block body
339	/// (and in the future, the header) will be preserved even when it falls
340	/// outside the pruning window. Does not affect state pruning.
341	pub pruning_filters: Vec<Arc<dyn PruningFilter>>,
342	/// Prometheus metrics registry.
343	pub metrics_registry: Option<Registry>,
344}
345
346/// Block pruning settings.
347#[derive(Debug, Clone, Copy, PartialEq)]
348pub enum BlocksPruning {
349	/// Keep full block history, of every block that was ever imported.
350	KeepAll,
351	/// Keep full finalized block history.
352	KeepFinalized,
353	/// Keep N recent finalized blocks.
354	Some(u32),
355}
356
357impl BlocksPruning {
358	/// True if this is an archive pruning mode (either KeepAll or KeepFinalized).
359	pub fn is_archive(&self) -> bool {
360		match *self {
361			BlocksPruning::KeepAll | BlocksPruning::KeepFinalized => true,
362			BlocksPruning::Some(_) => false,
363		}
364	}
365}
366
367/// Where to find the database..
368#[derive(Debug, Clone)]
369pub enum DatabaseSource {
370	/// Check given path, and see if there is an existing database there. If it's either `RocksDb`
371	/// or `ParityDb`, use it. If there is none, create a new instance of `ParityDb`.
372	Auto {
373		/// Path to the paritydb database.
374		paritydb_path: PathBuf,
375		/// Path to the rocksdb database.
376		rocksdb_path: PathBuf,
377		/// Cache size in MiB. Used only by `RocksDb` variant of `DatabaseSource`.
378		cache_size: usize,
379	},
380	/// Load a RocksDB database from a given path. Recommended for most uses.
381	#[cfg(feature = "rocksdb")]
382	RocksDb {
383		/// Path to the database.
384		path: PathBuf,
385		/// Cache size in MiB.
386		cache_size: usize,
387	},
388
389	/// Load a ParityDb database from a given path.
390	ParityDb {
391		/// Path to the database.
392		path: PathBuf,
393	},
394
395	/// Use a custom already-open database.
396	Custom {
397		/// the handle to the custom storage
398		db: Arc<dyn Database<DbHash>>,
399
400		/// if set, the `create` flag will be required to open such datasource
401		require_create_flag: bool,
402	},
403}
404
405impl DatabaseSource {
406	/// Return path for databases that are stored on disk.
407	pub fn path(&self) -> Option<&Path> {
408		match self {
409			// as per https://github.com/paritytech/substrate/pull/9500#discussion_r684312550
410			//
411			// IIUC this is needed for polkadot to create its own dbs, so until it can use parity db
412			// I would think rocksdb, but later parity-db.
413			DatabaseSource::Auto { paritydb_path, .. } => Some(paritydb_path),
414			#[cfg(feature = "rocksdb")]
415			DatabaseSource::RocksDb { path, .. } => Some(path),
416			DatabaseSource::ParityDb { path } => Some(path),
417			DatabaseSource::Custom { .. } => None,
418		}
419	}
420
421	/// Set path for databases that are stored on disk.
422	pub fn set_path(&mut self, p: &Path) -> bool {
423		match self {
424			DatabaseSource::Auto { ref mut paritydb_path, .. } => {
425				*paritydb_path = p.into();
426				true
427			},
428			#[cfg(feature = "rocksdb")]
429			DatabaseSource::RocksDb { ref mut path, .. } => {
430				*path = p.into();
431				true
432			},
433			DatabaseSource::ParityDb { ref mut path } => {
434				*path = p.into();
435				true
436			},
437			DatabaseSource::Custom { .. } => false,
438		}
439	}
440}
441
442impl std::fmt::Display for DatabaseSource {
443	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
444		let name = match self {
445			DatabaseSource::Auto { .. } => "Auto",
446			#[cfg(feature = "rocksdb")]
447			DatabaseSource::RocksDb { .. } => "RocksDb",
448			DatabaseSource::ParityDb { .. } => "ParityDb",
449			DatabaseSource::Custom { .. } => "Custom",
450		};
451		write!(f, "{}", name)
452	}
453}
454
455pub(crate) mod columns {
456	pub const META: u32 = crate::utils::COLUMN_META;
457	pub const STATE: u32 = 1;
458	pub const STATE_META: u32 = 2;
459	/// maps hashes to lookup keys and numbers to canon hashes.
460	pub const KEY_LOOKUP: u32 = 3;
461	pub const HEADER: u32 = 4;
462	pub const BODY: u32 = 5;
463	pub const JUSTIFICATIONS: u32 = 6;
464	pub const AUX: u32 = 8;
465	/// Offchain workers local storage
466	pub const OFFCHAIN: u32 = 9;
467	/// Transactions
468	pub const TRANSACTION: u32 = 11;
469	pub const BODY_INDEX: u32 = 12;
470}
471
472struct PendingBlock<Block: BlockT> {
473	header: Block::Header,
474	justifications: Option<Justifications>,
475	body: Option<Vec<Block::Extrinsic>>,
476	indexed_body: Option<Vec<Vec<u8>>>,
477	leaf_state: NewBlockState,
478	register_as_leaf: bool,
479}
480
481// wrapper that implements trait required for state_db
482#[derive(Clone)]
483struct StateMetaDb(Arc<dyn Database<DbHash>>);
484
485impl sc_state_db::MetaDb for StateMetaDb {
486	type Error = sp_database::error::DatabaseError;
487
488	fn get_meta(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
489		Ok(self.0.get(columns::STATE_META, key))
490	}
491}
492
493struct MetaUpdate<Block: BlockT> {
494	pub hash: Block::Hash,
495	pub number: NumberFor<Block>,
496	pub is_best: bool,
497	pub is_finalized: bool,
498	pub with_state: bool,
499}
500
501fn cache_header<Hash: std::cmp::Eq + std::hash::Hash, Header>(
502	cache: &mut LinkedHashMap<Hash, Option<Header>>,
503	hash: Hash,
504	header: Option<Header>,
505) {
506	cache.insert(hash, header);
507	while cache.len() > CACHE_HEADERS {
508		cache.pop_front();
509	}
510}
511
512/// Block database
513pub struct BlockchainDb<Block: BlockT> {
514	db: Arc<dyn Database<DbHash>>,
515	meta: Arc<RwLock<Meta<NumberFor<Block>, Block::Hash>>>,
516	leaves: RwLock<LeafSet<Block::Hash, NumberFor<Block>>>,
517	header_metadata_cache: Arc<HeaderMetadataCache<Block>>,
518	header_cache: Mutex<LinkedHashMap<Block::Hash, Option<Block::Header>>>,
519	pinned_blocks_cache: Arc<RwLock<PinnedBlocksCache<Block>>>,
520}
521
522impl<Block: BlockT> BlockchainDb<Block> {
523	fn new(db: Arc<dyn Database<DbHash>>) -> ClientResult<Self> {
524		let meta = read_meta::<Block>(&*db, columns::HEADER)?;
525		let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?;
526		Ok(BlockchainDb {
527			db,
528			leaves: RwLock::new(leaves),
529			meta: Arc::new(RwLock::new(meta)),
530			header_metadata_cache: Arc::new(HeaderMetadataCache::default()),
531			header_cache: Default::default(),
532			pinned_blocks_cache: Arc::new(RwLock::new(PinnedBlocksCache::new())),
533		})
534	}
535
536	fn update_meta(&self, update: MetaUpdate<Block>) {
537		let MetaUpdate { hash, number, is_best, is_finalized, with_state } = update;
538		let mut meta = self.meta.write();
539		if number.is_zero() {
540			meta.genesis_hash = hash;
541		}
542
543		if is_best {
544			meta.best_number = number;
545			meta.best_hash = hash;
546		}
547
548		if is_finalized {
549			if with_state {
550				meta.finalized_state = Some((hash, number));
551			}
552			meta.finalized_number = number;
553			meta.finalized_hash = hash;
554		}
555	}
556
557	fn update_block_gap(&self, gap: Option<BlockGap<NumberFor<Block>>>) {
558		let mut meta = self.meta.write();
559		meta.block_gap = gap;
560	}
561
562	/// Empty the cache of pinned items.
563	fn clear_pinning_cache(&self) {
564		self.pinned_blocks_cache.write().clear();
565	}
566
567	/// Load a justification into the cache of pinned items.
568	/// Reference count of the item will not be increased. Use this
569	/// to load values for items into the cache which have already been pinned.
570	fn insert_justifications_if_pinned(&self, hash: Block::Hash, justification: Justification) {
571		let mut cache = self.pinned_blocks_cache.write();
572		if !cache.contains(hash) {
573			return;
574		}
575
576		let justifications = Justifications::from(justification);
577		cache.insert_justifications(hash, Some(justifications));
578	}
579
580	/// Load a justification from the db into the cache of pinned items.
581	/// Reference count of the item will not be increased. Use this
582	/// to load values for items into the cache which have already been pinned.
583	fn insert_persisted_justifications_if_pinned(&self, hash: Block::Hash) -> ClientResult<()> {
584		let mut cache = self.pinned_blocks_cache.write();
585		if !cache.contains(hash) {
586			return Ok(());
587		}
588
589		let justifications = self.justifications_uncached(hash)?;
590		cache.insert_justifications(hash, justifications);
591		Ok(())
592	}
593
594	/// Load a block body from the db into the cache of pinned items.
595	/// Reference count of the item will not be increased. Use this
596	/// to load values for items items into the cache which have already been pinned.
597	fn insert_persisted_body_if_pinned(&self, hash: Block::Hash) -> ClientResult<()> {
598		let mut cache = self.pinned_blocks_cache.write();
599		if !cache.contains(hash) {
600			return Ok(());
601		}
602
603		let body = self.body_uncached(hash)?;
604		cache.insert_body(hash, body);
605		Ok(())
606	}
607
608	/// Bump reference count for pinned item.
609	fn bump_ref(&self, hash: Block::Hash) {
610		self.pinned_blocks_cache.write().pin(hash);
611	}
612
613	/// Decrease reference count for pinned item and remove if reference count is 0.
614	fn unpin(&self, hash: Block::Hash) {
615		self.pinned_blocks_cache.write().unpin(hash);
616	}
617
618	fn justifications_uncached(&self, hash: Block::Hash) -> ClientResult<Option<Justifications>> {
619		match read_db(
620			&*self.db,
621			columns::KEY_LOOKUP,
622			columns::JUSTIFICATIONS,
623			BlockId::<Block>::Hash(hash),
624		)? {
625			Some(justifications) => match Decode::decode(&mut &justifications[..]) {
626				Ok(justifications) => Ok(Some(justifications)),
627				Err(err) => {
628					return Err(sp_blockchain::Error::Backend(format!(
629						"Error decoding justifications: {err}"
630					)))
631				},
632			},
633			None => Ok(None),
634		}
635	}
636
637	fn body_uncached(&self, hash: Block::Hash) -> ClientResult<Option<Vec<Block::Extrinsic>>> {
638		if let Some(body) =
639			read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, BlockId::Hash::<Block>(hash))?
640		{
641			// Plain body
642			match Decode::decode(&mut &body[..]) {
643				Ok(body) => return Ok(Some(body)),
644				Err(err) => {
645					return Err(sp_blockchain::Error::Backend(format!(
646						"Error decoding body: {err}"
647					)))
648				},
649			}
650		}
651
652		if let Some(index) = read_db(
653			&*self.db,
654			columns::KEY_LOOKUP,
655			columns::BODY_INDEX,
656			BlockId::Hash::<Block>(hash),
657		)? {
658			match Vec::<DbExtrinsic<Block>>::decode(&mut &index[..]) {
659				Ok(index) => {
660					let mut body = Vec::new();
661					for ex in index {
662						match ex {
663							DbExtrinsic::Indexed { hash, header } => {
664								match self.db.get(columns::TRANSACTION, hash.as_ref()) {
665									Some(t) => {
666										let mut input =
667											utils::join_input(header.as_ref(), t.as_ref());
668										let ex = Block::Extrinsic::decode(&mut input).map_err(
669											|err| {
670												sp_blockchain::Error::Backend(format!(
671													"Error decoding indexed extrinsic: {err}"
672												))
673											},
674										)?;
675										body.push(ex);
676									},
677									None => {
678										return Err(sp_blockchain::Error::Backend(format!(
679											"Missing indexed transaction {hash:?}"
680										)))
681									},
682								};
683							},
684							DbExtrinsic::Full(ex) => {
685								body.push(ex);
686							},
687						}
688					}
689					return Ok(Some(body));
690				},
691				Err(err) => {
692					return Err(sp_blockchain::Error::Backend(format!(
693						"Error decoding body list: {err}",
694					)))
695				},
696			}
697		}
698		Ok(None)
699	}
700}
701
702impl<Block: BlockT> sc_client_api::blockchain::HeaderBackend<Block> for BlockchainDb<Block> {
703	fn header(&self, hash: Block::Hash) -> ClientResult<Option<Block::Header>> {
704		let mut cache = self.header_cache.lock();
705		if let Some(result) = cache.get_refresh(&hash) {
706			return Ok(result.clone());
707		}
708		let header = utils::read_header(
709			&*self.db,
710			columns::KEY_LOOKUP,
711			columns::HEADER,
712			BlockId::<Block>::Hash(hash),
713		)?;
714		cache_header(&mut cache, hash, header.clone());
715		Ok(header)
716	}
717
718	fn info(&self) -> sc_client_api::blockchain::Info<Block> {
719		let meta = self.meta.read();
720		sc_client_api::blockchain::Info {
721			best_hash: meta.best_hash,
722			best_number: meta.best_number,
723			genesis_hash: meta.genesis_hash,
724			finalized_hash: meta.finalized_hash,
725			finalized_number: meta.finalized_number,
726			finalized_state: meta.finalized_state,
727			number_leaves: self.leaves.read().count(),
728			block_gap: meta.block_gap,
729		}
730	}
731
732	fn status(&self, hash: Block::Hash) -> ClientResult<sc_client_api::blockchain::BlockStatus> {
733		match self.header(hash)?.is_some() {
734			true => Ok(sc_client_api::blockchain::BlockStatus::InChain),
735			false => Ok(sc_client_api::blockchain::BlockStatus::Unknown),
736		}
737	}
738
739	fn number(&self, hash: Block::Hash) -> ClientResult<Option<NumberFor<Block>>> {
740		Ok(self.header_metadata(hash).ok().map(|header_metadata| header_metadata.number))
741	}
742
743	fn hash(&self, number: NumberFor<Block>) -> ClientResult<Option<Block::Hash>> {
744		Ok(utils::read_header::<Block>(
745			&*self.db,
746			columns::KEY_LOOKUP,
747			columns::HEADER,
748			BlockId::Number(number),
749		)?
750		.map(|header| header.hash()))
751	}
752}
753
754impl<Block: BlockT> sc_client_api::blockchain::Backend<Block> for BlockchainDb<Block> {
755	fn body(&self, hash: Block::Hash) -> ClientResult<Option<Vec<Block::Extrinsic>>> {
756		let cache = self.pinned_blocks_cache.read();
757		if let Some(result) = cache.body(&hash) {
758			return Ok(result.clone());
759		}
760
761		self.body_uncached(hash)
762	}
763
764	fn justifications(&self, hash: Block::Hash) -> ClientResult<Option<Justifications>> {
765		let cache = self.pinned_blocks_cache.read();
766		if let Some(result) = cache.justifications(&hash) {
767			return Ok(result.clone());
768		}
769
770		self.justifications_uncached(hash)
771	}
772
773	fn last_finalized(&self) -> ClientResult<Block::Hash> {
774		Ok(self.meta.read().finalized_hash)
775	}
776
777	fn leaves(&self) -> ClientResult<Vec<Block::Hash>> {
778		Ok(self.leaves.read().hashes())
779	}
780
781	fn children(&self, parent_hash: Block::Hash) -> ClientResult<Vec<Block::Hash>> {
782		children::read_children(&*self.db, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash)
783	}
784
785	fn indexed_transaction(&self, hash: Block::Hash) -> ClientResult<Option<Vec<u8>>> {
786		Ok(self.db.get(columns::TRANSACTION, hash.as_ref()))
787	}
788
789	fn has_indexed_transaction(&self, hash: Block::Hash) -> ClientResult<bool> {
790		Ok(self.db.contains(columns::TRANSACTION, hash.as_ref()))
791	}
792
793	fn block_indexed_body(&self, hash: Block::Hash) -> ClientResult<Option<Vec<Vec<u8>>>> {
794		let body = match read_db(
795			&*self.db,
796			columns::KEY_LOOKUP,
797			columns::BODY_INDEX,
798			BlockId::<Block>::Hash(hash),
799		)? {
800			Some(body) => body,
801			None => return Ok(None),
802		};
803		match Vec::<DbExtrinsic<Block>>::decode(&mut &body[..]) {
804			Ok(index) => {
805				let mut transactions = Vec::new();
806				for ex in index.into_iter() {
807					if let DbExtrinsic::Indexed { hash, .. } = ex {
808						match self.db.get(columns::TRANSACTION, hash.as_ref()) {
809							Some(t) => transactions.push(t),
810							None => {
811								return Err(sp_blockchain::Error::Backend(format!(
812									"Missing indexed transaction {hash:?}",
813								)))
814							},
815						}
816					}
817				}
818				Ok(Some(transactions))
819			},
820			Err(err) => {
821				Err(sp_blockchain::Error::Backend(format!("Error decoding body list: {err}")))
822			},
823		}
824	}
825}
826
827impl<Block: BlockT> HeaderMetadata<Block> for BlockchainDb<Block> {
828	type Error = sp_blockchain::Error;
829
830	fn header_metadata(
831		&self,
832		hash: Block::Hash,
833	) -> Result<CachedHeaderMetadata<Block>, Self::Error> {
834		self.header_metadata_cache.header_metadata(hash).map_or_else(
835			|| {
836				self.header(hash)?
837					.map(|header| {
838						let header_metadata = CachedHeaderMetadata::from(&header);
839						self.header_metadata_cache
840							.insert_header_metadata(header_metadata.hash, header_metadata.clone());
841						header_metadata
842					})
843					.ok_or_else(|| {
844						ClientError::UnknownBlock(format!(
845							"Header was not found in the database: {hash:?}",
846						))
847					})
848			},
849			Ok,
850		)
851	}
852
853	fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata<Block>) {
854		self.header_metadata_cache.insert_header_metadata(hash, metadata)
855	}
856
857	fn remove_header_metadata(&self, hash: Block::Hash) {
858		self.header_cache.lock().remove(&hash);
859		self.header_metadata_cache.remove_header_metadata(hash);
860	}
861}
862
863/// Database transaction
864pub struct BlockImportOperation<Block: BlockT> {
865	old_state: RecordStatsState<RefTrackingState<Block>, Block>,
866	db_updates: PrefixedMemoryDB<HashingFor<Block>>,
867	storage_updates: StorageCollection,
868	child_storage_updates: ChildStorageCollection,
869	offchain_storage_updates: OffchainChangesCollection,
870	pending_block: Option<PendingBlock<Block>>,
871	aux_ops: Vec<(Vec<u8>, Option<Vec<u8>>)>,
872	finalized_blocks: Vec<(Block::Hash, Option<Justification>)>,
873	set_head: Option<Block::Hash>,
874	commit_state: bool,
875	create_gap: bool,
876	reset_storage: bool,
877	index_ops: Vec<IndexOperation>,
878}
879
880impl<Block: BlockT> BlockImportOperation<Block> {
881	fn apply_offchain(&mut self, transaction: &mut Transaction<DbHash>) {
882		let mut count = 0;
883		for ((prefix, key), value_operation) in self.offchain_storage_updates.drain(..) {
884			count += 1;
885			let key = crate::offchain::concatenate_prefix_and_key(&prefix, &key);
886			match value_operation {
887				OffchainOverlayedChange::SetValue(val) => {
888					transaction.set_from_vec(columns::OFFCHAIN, &key, val)
889				},
890				OffchainOverlayedChange::Remove => transaction.remove(columns::OFFCHAIN, &key),
891			}
892		}
893
894		if count > 0 {
895			log::debug!(target: "sc_offchain", "Applied {count} offchain indexing changes.");
896		}
897	}
898
899	fn apply_aux(&mut self, transaction: &mut Transaction<DbHash>) {
900		for (key, maybe_val) in self.aux_ops.drain(..) {
901			match maybe_val {
902				Some(val) => transaction.set_from_vec(columns::AUX, &key, val),
903				None => transaction.remove(columns::AUX, &key),
904			}
905		}
906	}
907
908	fn apply_new_state(
909		&mut self,
910		storage: Storage,
911		state_version: StateVersion,
912	) -> ClientResult<Block::Hash> {
913		if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(k)) {
914			return Err(sp_blockchain::Error::InvalidState);
915		}
916
917		let child_delta = storage.children_default.values().map(|child_content| {
918			(
919				&child_content.child_info,
920				child_content.data.iter().map(|(k, v)| (&k[..], Some(&v[..]))),
921			)
922		});
923
924		let (root, transaction) = self.old_state.full_storage_root(
925			storage.top.iter().map(|(k, v)| (&k[..], Some(&v[..]))),
926			child_delta,
927			state_version,
928		);
929
930		self.db_updates = transaction;
931		Ok(root)
932	}
933}
934
935impl<Block: BlockT> sc_client_api::backend::BlockImportOperation<Block>
936	for BlockImportOperation<Block>
937{
938	type State = RecordStatsState<RefTrackingState<Block>, Block>;
939
940	fn state(&self) -> ClientResult<Option<&Self::State>> {
941		Ok(Some(&self.old_state))
942	}
943
944	fn set_block_data(
945		&mut self,
946		header: Block::Header,
947		body: Option<Vec<Block::Extrinsic>>,
948		indexed_body: Option<Vec<Vec<u8>>>,
949		justifications: Option<Justifications>,
950		leaf_state: NewBlockState,
951		register_as_leaf: bool,
952	) -> ClientResult<()> {
953		assert!(self.pending_block.is_none(), "Only one block per operation is allowed");
954		self.pending_block = Some(PendingBlock {
955			header,
956			body,
957			indexed_body,
958			justifications,
959			leaf_state,
960			register_as_leaf,
961		});
962		Ok(())
963	}
964
965	fn update_db_storage(
966		&mut self,
967		update: PrefixedMemoryDB<HashingFor<Block>>,
968	) -> ClientResult<()> {
969		self.db_updates = update;
970		Ok(())
971	}
972
973	fn reset_storage(
974		&mut self,
975		storage: Storage,
976		state_version: StateVersion,
977	) -> ClientResult<Block::Hash> {
978		let root = self.apply_new_state(storage, state_version)?;
979		self.commit_state = true;
980		self.reset_storage = true;
981		Ok(root)
982	}
983
984	fn set_genesis_state(
985		&mut self,
986		storage: Storage,
987		commit: bool,
988		state_version: StateVersion,
989	) -> ClientResult<Block::Hash> {
990		let root = self.apply_new_state(storage, state_version)?;
991		self.commit_state = commit;
992		Ok(root)
993	}
994
995	fn insert_aux<I>(&mut self, ops: I) -> ClientResult<()>
996	where
997		I: IntoIterator<Item = (Vec<u8>, Option<Vec<u8>>)>,
998	{
999		self.aux_ops.append(&mut ops.into_iter().collect());
1000		Ok(())
1001	}
1002
1003	fn update_storage(
1004		&mut self,
1005		update: StorageCollection,
1006		child_update: ChildStorageCollection,
1007	) -> ClientResult<()> {
1008		self.storage_updates = update;
1009		self.child_storage_updates = child_update;
1010		Ok(())
1011	}
1012
1013	fn update_offchain_storage(
1014		&mut self,
1015		offchain_update: OffchainChangesCollection,
1016	) -> ClientResult<()> {
1017		self.offchain_storage_updates = offchain_update;
1018		Ok(())
1019	}
1020
1021	fn mark_finalized(
1022		&mut self,
1023		block: Block::Hash,
1024		justification: Option<Justification>,
1025	) -> ClientResult<()> {
1026		self.finalized_blocks.push((block, justification));
1027		Ok(())
1028	}
1029
1030	fn mark_head(&mut self, hash: Block::Hash) -> ClientResult<()> {
1031		assert!(self.set_head.is_none(), "Only one set head per operation is allowed");
1032		self.set_head = Some(hash);
1033		Ok(())
1034	}
1035
1036	fn update_transaction_index(&mut self, index_ops: Vec<IndexOperation>) -> ClientResult<()> {
1037		self.index_ops = index_ops;
1038		Ok(())
1039	}
1040
1041	fn set_create_gap(&mut self, create_gap: bool) {
1042		self.create_gap = create_gap;
1043	}
1044}
1045
1046struct StorageDb<Block: BlockT> {
1047	pub db: Arc<dyn Database<DbHash>>,
1048	pub state_db: StateDb<Block::Hash, Vec<u8>, StateMetaDb>,
1049	prefix_keys: bool,
1050}
1051
1052impl<Block: BlockT> sp_state_machine::Storage<HashingFor<Block>> for StorageDb<Block> {
1053	fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result<Option<DBValue>, String> {
1054		if self.prefix_keys {
1055			let key = prefixed_key::<HashingFor<Block>>(key, prefix);
1056			self.state_db.get(&key, self)
1057		} else {
1058			self.state_db.get(key.as_ref(), self)
1059		}
1060		.map_err(|e| format!("Database backend error: {e:?}"))
1061	}
1062}
1063
1064impl<Block: BlockT> sc_state_db::NodeDb for StorageDb<Block> {
1065	type Error = io::Error;
1066	type Key = [u8];
1067
1068	fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
1069		Ok(self.db.get(columns::STATE, key))
1070	}
1071}
1072
1073struct DbGenesisStorage<Block: BlockT> {
1074	root: Block::Hash,
1075	storage: PrefixedMemoryDB<HashingFor<Block>>,
1076}
1077
1078impl<Block: BlockT> DbGenesisStorage<Block> {
1079	pub fn new(root: Block::Hash, storage: PrefixedMemoryDB<HashingFor<Block>>) -> Self {
1080		DbGenesisStorage { root, storage }
1081	}
1082}
1083
1084impl<Block: BlockT> sp_state_machine::Storage<HashingFor<Block>> for DbGenesisStorage<Block> {
1085	fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result<Option<DBValue>, String> {
1086		use hash_db::HashDB;
1087		Ok(self.storage.get(key, prefix))
1088	}
1089}
1090
1091struct EmptyStorage<Block: BlockT>(pub Block::Hash);
1092
1093impl<Block: BlockT> EmptyStorage<Block> {
1094	pub fn new() -> Self {
1095		let mut root = Block::Hash::default();
1096		let mut mdb = MemoryDB::<HashingFor<Block>>::default();
1097		// both triedbmut are the same on empty storage.
1098		sp_trie::trie_types::TrieDBMutBuilderV1::<HashingFor<Block>>::new(&mut mdb, &mut root)
1099			.build();
1100		EmptyStorage(root)
1101	}
1102}
1103
1104impl<Block: BlockT> sp_state_machine::Storage<HashingFor<Block>> for EmptyStorage<Block> {
1105	fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result<Option<DBValue>, String> {
1106		Ok(None)
1107	}
1108}
1109
1110/// Frozen `value` at time `at`.
1111///
1112/// Used as inner structure under lock in `FrozenForDuration`.
1113struct Frozen<T: Clone> {
1114	at: std::time::Instant,
1115	value: Option<T>,
1116}
1117
1118/// Some value frozen for period of time.
1119///
1120/// If time `duration` not passed since the value was instantiated,
1121/// current frozen value is returned. Otherwise, you have to provide
1122/// a new value which will be again frozen for `duration`.
1123pub(crate) struct FrozenForDuration<T: Clone> {
1124	duration: std::time::Duration,
1125	value: parking_lot::Mutex<Frozen<T>>,
1126}
1127
1128impl<T: Clone> FrozenForDuration<T> {
1129	fn new(duration: std::time::Duration) -> Self {
1130		Self { duration, value: Frozen { at: std::time::Instant::now(), value: None }.into() }
1131	}
1132
1133	fn take_or_else<F>(&self, f: F) -> T
1134	where
1135		F: FnOnce() -> T,
1136	{
1137		let mut lock = self.value.lock();
1138		let now = std::time::Instant::now();
1139		match lock.value.as_ref() {
1140			Some(value) if now.saturating_duration_since(lock.at) <= self.duration => value.clone(),
1141			_ => {
1142				let new_value = f();
1143				lock.at = now;
1144				lock.value = Some(new_value.clone());
1145				new_value
1146			},
1147		}
1148	}
1149}
1150
1151/// Disk backend.
1152///
1153/// Disk backend keeps data in a key-value store. In archive mode, trie nodes are kept from all
1154/// blocks. Otherwise, trie nodes are kept only from some recent blocks.
1155pub struct Backend<Block: BlockT> {
1156	storage: Arc<StorageDb<Block>>,
1157	offchain_storage: offchain::LocalStorage,
1158	blockchain: BlockchainDb<Block>,
1159	canonicalization_delay: u64,
1160	import_lock: Arc<RwLock<()>>,
1161	is_archive: bool,
1162	blocks_pruning: BlocksPruning,
1163	io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>,
1164	state_usage: Arc<StateUsageStats>,
1165	genesis_state: RwLock<Option<Arc<DbGenesisStorage<Block>>>>,
1166	shared_trie_cache: Option<sp_trie::cache::SharedTrieCache<HashingFor<Block>>>,
1167	pruning_filters: Vec<Arc<dyn PruningFilter>>,
1168}
1169
1170impl<Block: BlockT> Backend<Block> {
1171	/// Create a new instance of database backend.
1172	///
1173	/// The pruning window is how old a block must be before the state is pruned.
1174	pub fn new(db_config: DatabaseSettings, canonicalization_delay: u64) -> ClientResult<Self> {
1175		use utils::OpenDbError;
1176
1177		let db_source = &db_config.source;
1178
1179		let (needs_init, db) =
1180			match crate::utils::open_database::<Block>(db_source, DatabaseType::Full, false) {
1181				Ok(db) => (false, db),
1182				Err(OpenDbError::DoesNotExist) => {
1183					let db =
1184						crate::utils::open_database::<Block>(db_source, DatabaseType::Full, true)?;
1185					(true, db)
1186				},
1187				Err(as_is) => return Err(as_is.into()),
1188			};
1189
1190		Self::from_database(db as Arc<_>, canonicalization_delay, &db_config, needs_init)
1191	}
1192
1193	/// Reset the shared trie cache.
1194	pub fn reset_trie_cache(&self) {
1195		if let Some(cache) = &self.shared_trie_cache {
1196			cache.reset();
1197		}
1198	}
1199
1200	/// Create new memory-backed client backend for tests.
1201	#[cfg(any(test, feature = "test-helpers"))]
1202	pub fn new_test(blocks_pruning: u32, canonicalization_delay: u64) -> Self {
1203		Self::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), canonicalization_delay)
1204	}
1205
1206	/// Create new memory-backed client backend for tests with custom pruning filters.
1207	#[cfg(any(test, feature = "test-helpers"))]
1208	pub fn new_test_with_pruning_filters(
1209		blocks_pruning: u32,
1210		canonicalization_delay: u64,
1211		pruning_filters: Vec<Arc<dyn PruningFilter>>,
1212	) -> Self {
1213		Self::new_test_with_tx_storage_and_filters(
1214			BlocksPruning::Some(blocks_pruning),
1215			canonicalization_delay,
1216			pruning_filters,
1217		)
1218	}
1219
1220	/// Create new memory-backed client backend for tests.
1221	#[cfg(any(test, feature = "test-helpers"))]
1222	pub fn new_test_with_tx_storage(
1223		blocks_pruning: BlocksPruning,
1224		canonicalization_delay: u64,
1225	) -> Self {
1226		Self::new_test_with_tx_storage_and_filters(
1227			blocks_pruning,
1228			canonicalization_delay,
1229			Default::default(),
1230		)
1231	}
1232
1233	/// Create new memory-backed client backend for tests with custom pruning filters.
1234	#[cfg(any(test, feature = "test-helpers"))]
1235	pub fn new_test_with_tx_storage_and_filters(
1236		blocks_pruning: BlocksPruning,
1237		canonicalization_delay: u64,
1238		pruning_filters: Vec<Arc<dyn PruningFilter>>,
1239	) -> Self {
1240		let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS);
1241		let db = sp_database::as_database(db);
1242		let state_pruning = match blocks_pruning {
1243			BlocksPruning::KeepAll => PruningMode::ArchiveAll,
1244			BlocksPruning::KeepFinalized => PruningMode::ArchiveCanonical,
1245			BlocksPruning::Some(n) => PruningMode::blocks_pruning(n),
1246		};
1247		let db_setting = DatabaseSettings {
1248			trie_cache_maximum_size: Some(16 * 1024 * 1024),
1249			state_pruning: Some(state_pruning),
1250			source: DatabaseSource::Custom { db, require_create_flag: true },
1251			blocks_pruning,
1252			pruning_filters,
1253			metrics_registry: None,
1254		};
1255
1256		Self::new(db_setting, canonicalization_delay).expect("failed to create test-db")
1257	}
1258
1259	/// Expose the Database that is used by this backend.
1260	/// The second argument is the Column that stores the State.
1261	///
1262	/// Should only be needed for benchmarking.
1263	#[cfg(feature = "runtime-benchmarks")]
1264	pub fn expose_db(&self) -> (Arc<dyn sp_database::Database<DbHash>>, sp_database::ColumnId) {
1265		(self.storage.db.clone(), columns::STATE)
1266	}
1267
1268	/// Expose the Storage that is used by this backend.
1269	///
1270	/// Should only be needed for benchmarking.
1271	#[cfg(feature = "runtime-benchmarks")]
1272	pub fn expose_storage(&self) -> Arc<dyn sp_state_machine::Storage<HashingFor<Block>>> {
1273		self.storage.clone()
1274	}
1275
1276	/// Expose the shared trie cache that is used by this backend.
1277	///
1278	/// Should only be needed for benchmarking.
1279	#[cfg(feature = "runtime-benchmarks")]
1280	pub fn expose_shared_trie_cache(
1281		&self,
1282	) -> Option<sp_trie::cache::SharedTrieCache<HashingFor<Block>>> {
1283		self.shared_trie_cache.clone()
1284	}
1285
1286	fn from_database(
1287		db: Arc<dyn Database<DbHash>>,
1288		canonicalization_delay: u64,
1289		config: &DatabaseSettings,
1290		should_init: bool,
1291	) -> ClientResult<Self> {
1292		let mut db_init_transaction = Transaction::new();
1293
1294		let requested_state_pruning = config.state_pruning.clone();
1295		let state_meta_db = StateMetaDb(db.clone());
1296		let map_e = sp_blockchain::Error::from_state_db;
1297
1298		let (state_db_init_commit_set, state_db) = StateDb::open(
1299			state_meta_db,
1300			requested_state_pruning,
1301			!db.supports_ref_counting(),
1302			should_init,
1303		)
1304		.map_err(map_e)?;
1305
1306		apply_state_commit(&mut db_init_transaction, state_db_init_commit_set);
1307
1308		let state_pruning_used = state_db.pruning_mode();
1309		let is_archive_pruning = state_pruning_used.is_archive();
1310		let blockchain = BlockchainDb::new(db.clone())?;
1311
1312		let storage_db =
1313			StorageDb { db: db.clone(), state_db, prefix_keys: !db.supports_ref_counting() };
1314
1315		let offchain_storage = offchain::LocalStorage::new(db.clone());
1316
1317		let shared_trie_cache = config.trie_cache_maximum_size.map(|maximum_size| {
1318			let system_memory = sysinfo::System::new_all();
1319			let used_memory = system_memory.used_memory();
1320			let total_memory = system_memory.total_memory();
1321
1322			debug!("Initializing shared trie cache with size {} bytes, {}% of total memory", maximum_size, (maximum_size as f64 / total_memory as f64 * 100.0));
1323			if maximum_size as u64 > total_memory - used_memory {
1324				warn!(
1325					"Not enough memory to initialize shared trie cache. Cache size: {} bytes. System memory: used {} bytes, total {} bytes",
1326					maximum_size, used_memory, total_memory,
1327				);
1328			}
1329
1330			SharedTrieCache::new(sp_trie::cache::CacheSize::new(maximum_size), config.metrics_registry.as_ref())
1331		});
1332
1333		let backend = Backend {
1334			storage: Arc::new(storage_db),
1335			offchain_storage,
1336			blockchain,
1337			canonicalization_delay,
1338			import_lock: Default::default(),
1339			is_archive: is_archive_pruning,
1340			io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)),
1341			state_usage: Arc::new(StateUsageStats::new()),
1342			blocks_pruning: config.blocks_pruning,
1343			genesis_state: RwLock::new(None),
1344			shared_trie_cache,
1345			pruning_filters: config.pruning_filters.clone(),
1346		};
1347
1348		// Older DB versions have no last state key. Check if the state is available and set it.
1349		let info = backend.blockchain.info();
1350		if info.finalized_state.is_none() &&
1351			info.finalized_hash != Default::default() &&
1352			sc_client_api::Backend::have_state_at(
1353				&backend,
1354				info.finalized_hash,
1355				info.finalized_number,
1356			) {
1357			backend.blockchain.update_meta(MetaUpdate {
1358				hash: info.finalized_hash,
1359				number: info.finalized_number,
1360				is_best: info.finalized_hash == info.best_hash,
1361				is_finalized: true,
1362				with_state: true,
1363			});
1364		}
1365
1366		// Non archive nodes cannot fill the missing block gap with bodies.
1367		// If the gap is present, it means that every restart will try to fill the gap:
1368		// - a block request is made for each and every block in the gap
1369		// - the request is fulfilled putting pressure on the network and other nodes
1370		// - upon receiving the block, the block cannot be executed since the state
1371		//  of the parent block might have been discarded
1372		// - then the sync engine closes the gap in memory, but never in DB.
1373		//
1374		// This leads to inefficient syncing and high CPU usage on every restart. To mitigate this,
1375		// remove the gap from the DB if we detect it and the current node is not an archive.
1376		match (backend.is_archive, info.block_gap) {
1377			(false, Some(gap)) if matches!(gap.gap_type, BlockGapType::MissingBody) => {
1378				warn!(
1379					"Detected a missing body gap for non-archive nodes. Removing the gap={:?}",
1380					gap
1381				);
1382
1383				db_init_transaction.remove(columns::META, meta_keys::BLOCK_GAP);
1384				db_init_transaction.remove(columns::META, meta_keys::BLOCK_GAP_VERSION);
1385				backend.blockchain.update_block_gap(None);
1386			},
1387			_ => {},
1388		}
1389
1390		db.commit(db_init_transaction)?;
1391
1392		Ok(backend)
1393	}
1394
1395	/// Handle setting head within a transaction. `route_to` should be the last
1396	/// block that existed in the database. `best_to` should be the best block
1397	/// to be set.
1398	///
1399	/// In the case where the new best block is a block to be imported, `route_to`
1400	/// should be the parent of `best_to`. In the case where we set an existing block
1401	/// to be best, `route_to` should equal to `best_to`.
1402	fn set_head_with_transaction(
1403		&self,
1404		transaction: &mut Transaction<DbHash>,
1405		route_to: Block::Hash,
1406		best_to: (NumberFor<Block>, Block::Hash),
1407	) -> ClientResult<(Vec<Block::Hash>, Vec<Block::Hash>)> {
1408		let mut enacted = Vec::default();
1409		let mut retracted = Vec::default();
1410
1411		let (best_number, best_hash) = best_to;
1412
1413		let meta = self.blockchain.meta.read();
1414
1415		if meta.best_number.saturating_sub(best_number).saturated_into::<u64>() >
1416			self.canonicalization_delay
1417		{
1418			return Err(sp_blockchain::Error::SetHeadTooOld);
1419		}
1420
1421		let parent_exists =
1422			self.blockchain.status(route_to)? == sp_blockchain::BlockStatus::InChain;
1423
1424		// Cannot find tree route with empty DB or when imported a detached block.
1425		if meta.best_hash != Default::default() && parent_exists {
1426			let tree_route = sp_blockchain::tree_route(&self.blockchain, meta.best_hash, route_to)?;
1427
1428			// uncanonicalize: check safety violations and ensure the numbers no longer
1429			// point to these block hashes in the key mapping.
1430			for r in tree_route.retracted() {
1431				if r.hash == meta.finalized_hash {
1432					warn!(
1433						"Potential safety failure: reverting finalized block {:?}",
1434						(&r.number, &r.hash)
1435					);
1436
1437					return Err(sp_blockchain::Error::NotInFinalizedChain);
1438				}
1439
1440				retracted.push(r.hash);
1441				utils::remove_number_to_key_mapping(transaction, columns::KEY_LOOKUP, r.number)?;
1442			}
1443
1444			// canonicalize: set the number lookup to map to this block's hash.
1445			for e in tree_route.enacted() {
1446				enacted.push(e.hash);
1447				utils::insert_number_to_key_mapping(
1448					transaction,
1449					columns::KEY_LOOKUP,
1450					e.number,
1451					e.hash,
1452				)?;
1453			}
1454		}
1455
1456		let lookup_key = utils::number_and_hash_to_lookup_key(best_number, &best_hash)?;
1457		transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, lookup_key);
1458		utils::insert_number_to_key_mapping(
1459			transaction,
1460			columns::KEY_LOOKUP,
1461			best_number,
1462			best_hash,
1463		)?;
1464
1465		Ok((enacted, retracted))
1466	}
1467
1468	fn ensure_sequential_finalization(
1469		&self,
1470		header: &Block::Header,
1471		last_finalized: Option<Block::Hash>,
1472	) -> ClientResult<()> {
1473		let last_finalized =
1474			last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash);
1475		if last_finalized != self.blockchain.meta.read().genesis_hash &&
1476			*header.parent_hash() != last_finalized
1477		{
1478			return Err(sp_blockchain::Error::NonSequentialFinalization(format!(
1479				"Last finalized {last_finalized:?} not parent of {:?}",
1480				header.hash()
1481			)));
1482		}
1483		Ok(())
1484	}
1485
1486	/// `remove_displaced` can be set to `false` if this is not the last of many subsequent calls
1487	/// for performance reasons.
1488	fn finalize_block_with_transaction(
1489		&self,
1490		transaction: &mut Transaction<DbHash>,
1491		hash: Block::Hash,
1492		header: &Block::Header,
1493		last_finalized: Option<Block::Hash>,
1494		justification: Option<Justification>,
1495		current_transaction_justifications: &mut HashMap<Block::Hash, Justification>,
1496		remove_displaced: bool,
1497	) -> ClientResult<MetaUpdate<Block>> {
1498		// TODO: ensure best chain contains this block.
1499		let number = *header.number();
1500		self.ensure_sequential_finalization(header, last_finalized)?;
1501		let with_state = sc_client_api::Backend::have_state_at(self, hash, number);
1502
1503		self.note_finalized(
1504			transaction,
1505			header,
1506			hash,
1507			with_state,
1508			current_transaction_justifications,
1509			remove_displaced,
1510		)?;
1511
1512		if let Some(justification) = justification {
1513			transaction.set_from_vec(
1514				columns::JUSTIFICATIONS,
1515				&utils::number_and_hash_to_lookup_key(number, hash)?,
1516				Justifications::from(justification.clone()).encode(),
1517			);
1518			current_transaction_justifications.insert(hash, justification);
1519		}
1520		Ok(MetaUpdate { hash, number, is_best: false, is_finalized: true, with_state })
1521	}
1522
1523	// performs forced canonicalization with a delay after importing a non-finalized block.
1524	fn force_delayed_canonicalize(
1525		&self,
1526		transaction: &mut Transaction<DbHash>,
1527	) -> ClientResult<()> {
1528		let best_canonical = match self.storage.state_db.last_canonicalized() {
1529			LastCanonicalized::None => 0,
1530			LastCanonicalized::Block(b) => b,
1531			// Nothing needs to be done when canonicalization is not happening.
1532			LastCanonicalized::NotCanonicalizing => return Ok(()),
1533		};
1534
1535		let info = self.blockchain.info();
1536		let best_number: u64 = self.blockchain.info().best_number.saturated_into();
1537
1538		for to_canonicalize in
1539			best_canonical + 1..=best_number.saturating_sub(self.canonicalization_delay)
1540		{
1541			let hash_to_canonicalize = sc_client_api::blockchain::HeaderBackend::hash(
1542				&self.blockchain,
1543				to_canonicalize.saturated_into(),
1544			)?
1545			.ok_or_else(|| {
1546				let best_hash = info.best_hash;
1547
1548				sp_blockchain::Error::Backend(format!(
1549					"Can't canonicalize missing block number #{to_canonicalize} when for best block {best_hash:?} (#{best_number})",
1550				))
1551			})?;
1552
1553			if !sc_client_api::Backend::have_state_at(
1554				self,
1555				hash_to_canonicalize,
1556				to_canonicalize.saturated_into(),
1557			) {
1558				return Ok(());
1559			}
1560
1561			trace!(target: "db", "Canonicalize block #{to_canonicalize} ({hash_to_canonicalize:?})");
1562			let commit = self.storage.state_db.canonicalize_block(&hash_to_canonicalize).map_err(
1563				sp_blockchain::Error::from_state_db::<
1564					sc_state_db::Error<sp_database::error::DatabaseError>,
1565				>,
1566			)?;
1567			apply_state_commit(transaction, commit);
1568		}
1569
1570		Ok(())
1571	}
1572
1573	fn try_commit_operation(&self, mut operation: BlockImportOperation<Block>) -> ClientResult<()> {
1574		let mut transaction = Transaction::new();
1575
1576		operation.apply_aux(&mut transaction);
1577		operation.apply_offchain(&mut transaction);
1578
1579		let mut meta_updates = Vec::with_capacity(operation.finalized_blocks.len());
1580		let (best_num, mut last_finalized_hash, mut last_finalized_num, mut block_gap) = {
1581			let meta = self.blockchain.meta.read();
1582			(meta.best_number, meta.finalized_hash, meta.finalized_number, meta.block_gap)
1583		};
1584
1585		let mut block_gap_updated = false;
1586
1587		let mut current_transaction_justifications: HashMap<Block::Hash, Justification> =
1588			HashMap::new();
1589		let mut finalized_blocks = operation.finalized_blocks.into_iter().peekable();
1590		while let Some((block_hash, justification)) = finalized_blocks.next() {
1591			let block_header = self.blockchain.expect_header(block_hash)?;
1592			meta_updates.push(self.finalize_block_with_transaction(
1593				&mut transaction,
1594				block_hash,
1595				&block_header,
1596				Some(last_finalized_hash),
1597				justification,
1598				&mut current_transaction_justifications,
1599				finalized_blocks.peek().is_none(),
1600			)?);
1601			last_finalized_hash = block_hash;
1602			last_finalized_num = *block_header.number();
1603		}
1604
1605		let imported = if let Some(pending_block) = operation.pending_block {
1606			let hash = pending_block.header.hash();
1607
1608			let parent_hash = *pending_block.header.parent_hash();
1609			let number = *pending_block.header.number();
1610			let highest_leaf = self
1611				.blockchain
1612				.leaves
1613				.read()
1614				.highest_leaf()
1615				.map(|(n, _)| n)
1616				.unwrap_or(Zero::zero());
1617			let header_exists_in_db =
1618				number <= highest_leaf && self.blockchain.header(hash)?.is_some();
1619			// Body in DB (not incoming block) - needed to update gap when adding body to existing
1620			// header.
1621			let body_exists_in_db = self.blockchain.body(hash)?.is_some();
1622			// Incoming block has body - used for fast sync gap handling.
1623			let incoming_has_body = pending_block.body.is_some();
1624
1625			// blocks are keyed by number + hash.
1626			let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?;
1627
1628			if pending_block.leaf_state.is_best() {
1629				self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?;
1630			};
1631
1632			utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash)?;
1633
1634			transaction.set_from_vec(columns::HEADER, &lookup_key, pending_block.header.encode());
1635			if let Some(body) = pending_block.body {
1636				// If we have any index operations we save block in the new format with indexed
1637				// extrinsic headers Otherwise we save the body as a single blob.
1638				if operation.index_ops.is_empty() {
1639					transaction.set_from_vec(columns::BODY, &lookup_key, body.encode());
1640				} else {
1641					let body =
1642						apply_index_ops::<Block>(&mut transaction, body, operation.index_ops);
1643					transaction.set_from_vec(columns::BODY_INDEX, &lookup_key, body);
1644				}
1645			}
1646			if let Some(body) = pending_block.indexed_body {
1647				apply_indexed_body::<Block>(&mut transaction, body);
1648			}
1649			if let Some(justifications) = pending_block.justifications {
1650				transaction.set_from_vec(
1651					columns::JUSTIFICATIONS,
1652					&lookup_key,
1653					justifications.encode(),
1654				);
1655			}
1656
1657			if number.is_zero() {
1658				transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref());
1659
1660				if operation.commit_state {
1661					transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key);
1662				} else {
1663					// When we don't want to commit the genesis state, we still preserve it in
1664					// memory to bootstrap consensus. It is queried for an initial list of
1665					// authorities, etc.
1666					*self.genesis_state.write() = Some(Arc::new(DbGenesisStorage::new(
1667						*pending_block.header.state_root(),
1668						operation.db_updates.clone(),
1669					)));
1670				}
1671			}
1672
1673			let finalized = if operation.commit_state {
1674				let mut changeset: sc_state_db::ChangeSet<Vec<u8>> =
1675					sc_state_db::ChangeSet::default();
1676				let mut ops: u64 = 0;
1677				let mut bytes: u64 = 0;
1678				let mut removal: u64 = 0;
1679				let mut bytes_removal: u64 = 0;
1680				for (mut key, (val, rc)) in operation.db_updates.drain() {
1681					self.storage.db.sanitize_key(&mut key);
1682					if rc > 0 {
1683						ops += 1;
1684						bytes += key.len() as u64 + val.len() as u64;
1685						if rc == 1 {
1686							changeset.inserted.push((key, val.to_vec()));
1687						} else {
1688							changeset.inserted.push((key.clone(), val.to_vec()));
1689							for _ in 0..rc - 1 {
1690								changeset.inserted.push((key.clone(), Default::default()));
1691							}
1692						}
1693					} else if rc < 0 {
1694						removal += 1;
1695						bytes_removal += key.len() as u64;
1696						if rc == -1 {
1697							changeset.deleted.push(key);
1698						} else {
1699							for _ in 0..-rc {
1700								changeset.deleted.push(key.clone());
1701							}
1702						}
1703					}
1704				}
1705				self.state_usage.tally_writes_nodes(ops, bytes);
1706				self.state_usage.tally_removed_nodes(removal, bytes_removal);
1707
1708				let mut ops: u64 = 0;
1709				let mut bytes: u64 = 0;
1710				for (key, value) in operation
1711					.storage_updates
1712					.iter()
1713					.chain(operation.child_storage_updates.iter().flat_map(|(_, s)| s.iter()))
1714				{
1715					ops += 1;
1716					bytes += key.len() as u64;
1717					if let Some(v) = value.as_ref() {
1718						bytes += v.len() as u64;
1719					}
1720				}
1721				self.state_usage.tally_writes(ops, bytes);
1722				let number_u64 = number.saturated_into::<u64>();
1723				let commit = self
1724					.storage
1725					.state_db
1726					.insert_block(&hash, number_u64, pending_block.header.parent_hash(), changeset)
1727					.map_err(|e: sc_state_db::Error<sp_database::error::DatabaseError>| {
1728						sp_blockchain::Error::from_state_db(e)
1729					})?;
1730				apply_state_commit(&mut transaction, commit);
1731				if number <= last_finalized_num {
1732					// Canonicalize in the db when re-importing existing blocks with state.
1733					let commit = self.storage.state_db.canonicalize_block(&hash).map_err(
1734						sp_blockchain::Error::from_state_db::<
1735							sc_state_db::Error<sp_database::error::DatabaseError>,
1736						>,
1737					)?;
1738					apply_state_commit(&mut transaction, commit);
1739					meta_updates.push(MetaUpdate {
1740						hash,
1741						number,
1742						is_best: false,
1743						is_finalized: true,
1744						with_state: true,
1745					});
1746				}
1747
1748				// Check if need to finalize. Genesis is always finalized instantly.
1749				let finalized = number_u64 == 0 || pending_block.leaf_state.is_final();
1750				finalized
1751			} else {
1752				(number.is_zero() && last_finalized_num.is_zero()) ||
1753					pending_block.leaf_state.is_final()
1754			};
1755
1756			let header = &pending_block.header;
1757			let is_best = pending_block.leaf_state.is_best();
1758			trace!(
1759				target: "db",
1760				"DB Commit {hash:?} ({number}), best={is_best}, state={}, header_in_db={header_exists_in_db} body_in_db={body_exists_in_db} incoming_body={incoming_has_body}, finalized={finalized}",
1761				operation.commit_state,
1762			);
1763
1764			self.state_usage.merge_sm(operation.old_state.usage_info());
1765
1766			// release state reference so that it can be finalized
1767			// VERY IMPORTANT
1768			drop(operation.old_state);
1769
1770			if finalized {
1771				// TODO: ensure best chain contains this block.
1772				self.ensure_sequential_finalization(header, Some(last_finalized_hash))?;
1773				let mut current_transaction_justifications = HashMap::new();
1774				self.note_finalized(
1775					&mut transaction,
1776					header,
1777					hash,
1778					operation.commit_state,
1779					&mut current_transaction_justifications,
1780					true,
1781				)?;
1782			} else {
1783				// canonicalize blocks which are old enough, regardless of finality.
1784				self.force_delayed_canonicalize(&mut transaction)?
1785			}
1786
1787			if !header_exists_in_db {
1788				// Add a new leaf if the block has the potential to be finalized.
1789				if pending_block.register_as_leaf &&
1790					(number > last_finalized_num || last_finalized_num.is_zero())
1791				{
1792					let mut leaves = self.blockchain.leaves.write();
1793					leaves.import(hash, number, parent_hash);
1794					leaves.prepare_transaction(
1795						&mut transaction,
1796						columns::META,
1797						meta_keys::LEAF_PREFIX,
1798					);
1799				}
1800
1801				let mut children = children::read_children(
1802					&*self.storage.db,
1803					columns::META,
1804					meta_keys::CHILDREN_PREFIX,
1805					parent_hash,
1806				)?;
1807				if !children.contains(&hash) {
1808					children.push(hash);
1809					children::write_children(
1810						&mut transaction,
1811						columns::META,
1812						meta_keys::CHILDREN_PREFIX,
1813						parent_hash,
1814						children,
1815					);
1816				}
1817			}
1818
1819			let should_check_block_gap = !header_exists_in_db || !body_exists_in_db;
1820			debug!(
1821				target: "db",
1822				"should_check_block_gap = {should_check_block_gap}",
1823			);
1824
1825			if should_check_block_gap {
1826				let update_gap =
1827					|transaction: &mut Transaction<DbHash>,
1828					 new_gap: BlockGap<NumberFor<Block>>,
1829					 block_gap: &mut Option<BlockGap<NumberFor<Block>>>| {
1830						transaction.set(columns::META, meta_keys::BLOCK_GAP, &new_gap.encode());
1831						transaction.set(
1832							columns::META,
1833							meta_keys::BLOCK_GAP_VERSION,
1834							&BLOCK_GAP_CURRENT_VERSION.encode(),
1835						);
1836						block_gap.replace(new_gap);
1837						debug!(target: "db", "Update block gap. {block_gap:?}");
1838					};
1839
1840				let remove_gap =
1841					|transaction: &mut Transaction<DbHash>,
1842					 block_gap: &mut Option<BlockGap<NumberFor<Block>>>| {
1843						transaction.remove(columns::META, meta_keys::BLOCK_GAP);
1844						transaction.remove(columns::META, meta_keys::BLOCK_GAP_VERSION);
1845						*block_gap = None;
1846						debug!(target: "db", "Removed block gap.");
1847					};
1848
1849				if let Some(mut gap) = block_gap {
1850					match gap.gap_type {
1851						BlockGapType::MissingHeaderAndBody => {
1852							// Handle blocks at gap start or immediately following (possibly
1853							// indicating blocks already imported during warp sync where
1854							// start was not updated).
1855							if number == gap.start {
1856								gap.start = number + One::one();
1857								utils::insert_number_to_key_mapping(
1858									&mut transaction,
1859									columns::KEY_LOOKUP,
1860									number,
1861									hash,
1862								)?;
1863								if gap.start > gap.end {
1864									remove_gap(&mut transaction, &mut block_gap);
1865								} else {
1866									update_gap(&mut transaction, gap, &mut block_gap);
1867								}
1868								block_gap_updated = true;
1869							}
1870						},
1871						BlockGapType::MissingBody => {
1872							// Gap increased when syncing the header chain during fast sync.
1873							if number == gap.end + One::one() && !incoming_has_body {
1874								gap.end += One::one();
1875								utils::insert_number_to_key_mapping(
1876									&mut transaction,
1877									columns::KEY_LOOKUP,
1878									number,
1879									hash,
1880								)?;
1881								update_gap(&mut transaction, gap, &mut block_gap);
1882								block_gap_updated = true;
1883							// Gap decreased when downloading the full blocks.
1884							} else if number == gap.start && incoming_has_body {
1885								gap.start += One::one();
1886								if gap.start > gap.end {
1887									remove_gap(&mut transaction, &mut block_gap);
1888								} else {
1889									update_gap(&mut transaction, gap, &mut block_gap);
1890								}
1891								block_gap_updated = true;
1892							}
1893						},
1894					}
1895				} else if operation.create_gap {
1896					if number > best_num + One::one() &&
1897						self.blockchain.header(parent_hash)?.is_none()
1898					{
1899						let gap = BlockGap {
1900							start: best_num + One::one(),
1901							end: number - One::one(),
1902							gap_type: BlockGapType::MissingHeaderAndBody,
1903						};
1904						update_gap(&mut transaction, gap, &mut block_gap);
1905						block_gap_updated = true;
1906						debug!(target: "db", "Detected block gap (warp sync) {block_gap:?}");
1907					} else if number == best_num + One::one() &&
1908						self.blockchain.header(parent_hash)?.is_some() &&
1909						!incoming_has_body
1910					{
1911						let gap = BlockGap {
1912							start: number,
1913							end: number,
1914							gap_type: BlockGapType::MissingBody,
1915						};
1916						update_gap(&mut transaction, gap, &mut block_gap);
1917						block_gap_updated = true;
1918						debug!(target: "db", "Detected block gap (fast sync) {block_gap:?}");
1919					}
1920				}
1921			}
1922
1923			meta_updates.push(MetaUpdate {
1924				hash,
1925				number,
1926				is_best: pending_block.leaf_state.is_best(),
1927				is_finalized: finalized,
1928				with_state: operation.commit_state,
1929			});
1930			Some((pending_block.header, hash))
1931		} else {
1932			None
1933		};
1934
1935		if let Some(set_head) = operation.set_head {
1936			if let Some(header) =
1937				sc_client_api::blockchain::HeaderBackend::header(&self.blockchain, set_head)?
1938			{
1939				let number = header.number();
1940				let hash = header.hash();
1941
1942				self.set_head_with_transaction(&mut transaction, hash, (*number, hash))?;
1943
1944				meta_updates.push(MetaUpdate {
1945					hash,
1946					number: *number,
1947					is_best: true,
1948					is_finalized: false,
1949					with_state: false,
1950				});
1951			} else {
1952				return Err(sp_blockchain::Error::UnknownBlock(format!(
1953					"Cannot set head {set_head:?}",
1954				)));
1955			}
1956		}
1957
1958		self.storage.db.commit(transaction)?;
1959
1960		// `reset_storage == true` means the entire state got replaced.
1961		// In this case we optimize the `STATE` column to improve read performance.
1962		if operation.reset_storage {
1963			if let Err(e) = self.storage.db.optimize_db_col(columns::STATE) {
1964				warn!(target: "db", "Failed to optimize database after state import: {e:?}");
1965			}
1966		}
1967
1968		// Apply all in-memory state changes.
1969		// Code beyond this point can't fail.
1970
1971		if let Some((header, hash)) = imported {
1972			trace!(target: "db", "DB Commit done {hash:?}");
1973			let header_metadata = CachedHeaderMetadata::from(&header);
1974			self.blockchain.insert_header_metadata(header_metadata.hash, header_metadata);
1975			cache_header(&mut self.blockchain.header_cache.lock(), hash, Some(header));
1976		}
1977
1978		for m in meta_updates {
1979			self.blockchain.update_meta(m);
1980		}
1981		if block_gap_updated {
1982			self.blockchain.update_block_gap(block_gap);
1983		}
1984
1985		Ok(())
1986	}
1987
1988	// Write stuff to a transaction after a new block is finalized. This canonicalizes finalized
1989	// blocks. Fails if called with a block which was not a child of the last finalized block.
1990	/// `remove_displaced` can be set to `false` if this is not the last of many subsequent calls
1991	/// for performance reasons.
1992	fn note_finalized(
1993		&self,
1994		transaction: &mut Transaction<DbHash>,
1995		f_header: &Block::Header,
1996		f_hash: Block::Hash,
1997		with_state: bool,
1998		current_transaction_justifications: &mut HashMap<Block::Hash, Justification>,
1999		remove_displaced: bool,
2000	) -> ClientResult<()> {
2001		let f_num = *f_header.number();
2002
2003		let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash)?;
2004		if with_state {
2005			transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key.clone());
2006		}
2007		transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key);
2008
2009		let requires_canonicalization = match self.storage.state_db.last_canonicalized() {
2010			LastCanonicalized::None => true,
2011			LastCanonicalized::Block(b) => f_num.saturated_into::<u64>() > b,
2012			LastCanonicalized::NotCanonicalizing => false,
2013		};
2014
2015		if requires_canonicalization && sc_client_api::Backend::have_state_at(self, f_hash, f_num) {
2016			let commit = self.storage.state_db.canonicalize_block(&f_hash).map_err(
2017				sp_blockchain::Error::from_state_db::<
2018					sc_state_db::Error<sp_database::error::DatabaseError>,
2019				>,
2020			)?;
2021			apply_state_commit(transaction, commit);
2022		}
2023
2024		if remove_displaced {
2025			let new_displaced = self.blockchain.displaced_leaves_after_finalizing(
2026				f_hash,
2027				f_num,
2028				*f_header.parent_hash(),
2029			)?;
2030
2031			self.blockchain.leaves.write().remove_displaced_leaves(FinalizationOutcome::new(
2032				new_displaced.displaced_leaves.iter().copied(),
2033			));
2034
2035			if !matches!(self.blocks_pruning, BlocksPruning::KeepAll) {
2036				self.prune_displaced_branches(transaction, &new_displaced)?;
2037			}
2038		}
2039
2040		self.prune_blocks(transaction, f_num, current_transaction_justifications)?;
2041
2042		Ok(())
2043	}
2044
2045	fn prune_blocks(
2046		&self,
2047		transaction: &mut Transaction<DbHash>,
2048		finalized_number: NumberFor<Block>,
2049		current_transaction_justifications: &mut HashMap<Block::Hash, Justification>,
2050	) -> ClientResult<()> {
2051		if let BlocksPruning::Some(blocks_pruning) = self.blocks_pruning {
2052			// Always keep the last finalized block
2053			let keep = std::cmp::max(blocks_pruning, 1);
2054			if finalized_number >= keep.into() {
2055				let number = finalized_number.saturating_sub(keep.into());
2056
2057				// Before we prune a block, check if it is pinned
2058				if let Some(hash) = self.blockchain.hash(number)? {
2059					// Check if any pruning filter wants to preserve this block.
2060					// We need to check both the current transaction justifications (not yet in DB)
2061					// and the DB itself (for justifications from previous transactions).
2062					if !self.pruning_filters.is_empty() {
2063						let justifications = match current_transaction_justifications.get(&hash) {
2064							Some(j) => Some(Justifications::from(j.clone())),
2065							None => self.blockchain.justifications(hash)?,
2066						};
2067
2068						let should_retain = justifications
2069							.map(|j| self.pruning_filters.iter().any(|f| f.should_retain(&j)))
2070							.unwrap_or(false);
2071
2072						// We can just return here, pinning can be ignored since the block will
2073						// remain in the DB.
2074						if should_retain {
2075							debug!(
2076								target: "db",
2077								"Preserving block #{number} ({hash}) due to keep predicate match"
2078							);
2079							return Ok(());
2080						}
2081					}
2082
2083					self.blockchain.insert_persisted_body_if_pinned(hash)?;
2084
2085					// If the block was finalized in this transaction, it will not be in the db
2086					// yet.
2087					if let Some(justification) = current_transaction_justifications.remove(&hash) {
2088						self.blockchain.insert_justifications_if_pinned(hash, justification);
2089					} else {
2090						self.blockchain.insert_persisted_justifications_if_pinned(hash)?;
2091					}
2092				};
2093
2094				self.prune_block(transaction, BlockId::<Block>::number(number))?;
2095			}
2096		}
2097		Ok(())
2098	}
2099
2100	fn prune_displaced_branches(
2101		&self,
2102		transaction: &mut Transaction<DbHash>,
2103		displaced: &DisplacedLeavesAfterFinalization<Block>,
2104	) -> ClientResult<()> {
2105		// Discard all blocks from displaced branches
2106		for &hash in displaced.displaced_blocks.iter() {
2107			self.blockchain.insert_persisted_body_if_pinned(hash)?;
2108			self.prune_block(transaction, BlockId::<Block>::hash(hash))?;
2109		}
2110		Ok(())
2111	}
2112
2113	fn prune_block(
2114		&self,
2115		transaction: &mut Transaction<DbHash>,
2116		id: BlockId<Block>,
2117	) -> ClientResult<()> {
2118		debug!(target: "db", "Removing block #{id}");
2119		utils::remove_from_db(
2120			transaction,
2121			&*self.storage.db,
2122			columns::KEY_LOOKUP,
2123			columns::BODY,
2124			id,
2125		)?;
2126		utils::remove_from_db(
2127			transaction,
2128			&*self.storage.db,
2129			columns::KEY_LOOKUP,
2130			columns::JUSTIFICATIONS,
2131			id,
2132		)?;
2133		if let Some(index) =
2134			read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)?
2135		{
2136			utils::remove_from_db(
2137				transaction,
2138				&*self.storage.db,
2139				columns::KEY_LOOKUP,
2140				columns::BODY_INDEX,
2141				id,
2142			)?;
2143			match Vec::<DbExtrinsic<Block>>::decode(&mut &index[..]) {
2144				Ok(index) => {
2145					for ex in index {
2146						if let DbExtrinsic::Indexed { hash, .. } = ex {
2147							transaction.release(columns::TRANSACTION, hash);
2148						}
2149					}
2150				},
2151				Err(err) => {
2152					return Err(sp_blockchain::Error::Backend(format!(
2153						"Error decoding body list: {err}",
2154					)))
2155				},
2156			}
2157		}
2158		Ok(())
2159	}
2160
2161	fn empty_state(&self) -> RecordStatsState<RefTrackingState<Block>, Block> {
2162		let root = EmptyStorage::<Block>::new().0; // Empty trie
2163		let db_state = DbStateBuilder::<HashingFor<Block>>::new(self.storage.clone(), root)
2164			.with_optional_cache(self.shared_trie_cache.as_ref().map(|c| c.local_cache_untrusted()))
2165			.build();
2166		let state = RefTrackingState::new(db_state, self.storage.clone(), None);
2167		RecordStatsState::new(state, None, self.state_usage.clone())
2168	}
2169}
2170
2171fn apply_state_commit(
2172	transaction: &mut Transaction<DbHash>,
2173	commit: sc_state_db::CommitSet<Vec<u8>>,
2174) {
2175	for (key, val) in commit.data.inserted.into_iter() {
2176		transaction.set_from_vec(columns::STATE, &key[..], val);
2177	}
2178	for key in commit.data.deleted.into_iter() {
2179		transaction.remove(columns::STATE, &key[..]);
2180	}
2181	for (key, val) in commit.meta.inserted.into_iter() {
2182		transaction.set_from_vec(columns::STATE_META, &key[..], val);
2183	}
2184	for key in commit.meta.deleted.into_iter() {
2185		transaction.remove(columns::STATE_META, &key[..]);
2186	}
2187}
2188
2189fn apply_index_ops<Block: BlockT>(
2190	transaction: &mut Transaction<DbHash>,
2191	body: Vec<Block::Extrinsic>,
2192	ops: Vec<IndexOperation>,
2193) -> Vec<u8> {
2194	let mut extrinsic_index: Vec<DbExtrinsic<Block>> = Vec::with_capacity(body.len());
2195	let mut index_map = HashMap::new();
2196	let mut renewed_map = HashMap::new();
2197	for op in ops {
2198		match op {
2199			IndexOperation::Insert { extrinsic, hash, size } => {
2200				index_map.insert(extrinsic, (hash, size));
2201			},
2202			IndexOperation::Renew { extrinsic, hash } => {
2203				renewed_map.insert(extrinsic, DbHash::from_slice(hash.as_ref()));
2204			},
2205		}
2206	}
2207	for (index, extrinsic) in body.into_iter().enumerate() {
2208		let db_extrinsic = if let Some(hash) = renewed_map.get(&(index as u32)) {
2209			// Bump ref counter
2210			let extrinsic = extrinsic.encode();
2211			transaction.reference(columns::TRANSACTION, DbHash::from_slice(hash.as_ref()));
2212			DbExtrinsic::Indexed { hash: *hash, header: extrinsic }
2213		} else {
2214			match index_map.get(&(index as u32)) {
2215				Some((hash, size)) => {
2216					let encoded = extrinsic.encode();
2217					if *size as usize <= encoded.len() {
2218						let offset = encoded.len() - *size as usize;
2219						transaction.store(
2220							columns::TRANSACTION,
2221							DbHash::from_slice(hash.as_ref()),
2222							encoded[offset..].to_vec(),
2223						);
2224						DbExtrinsic::Indexed {
2225							hash: DbHash::from_slice(hash.as_ref()),
2226							header: encoded[..offset].to_vec(),
2227						}
2228					} else {
2229						// Invalid indexed slice. Just store full data and don't index anything.
2230						DbExtrinsic::Full(extrinsic)
2231					}
2232				},
2233				_ => DbExtrinsic::Full(extrinsic),
2234			}
2235		};
2236		extrinsic_index.push(db_extrinsic);
2237	}
2238	debug!(
2239		target: "db",
2240		"DB transaction index: {} inserted, {} renewed, {} full",
2241		index_map.len(),
2242		renewed_map.len(),
2243		extrinsic_index.len() - index_map.len() - renewed_map.len(),
2244	);
2245	extrinsic_index.encode()
2246}
2247
2248fn apply_indexed_body<Block: BlockT>(transaction: &mut Transaction<DbHash>, body: Vec<Vec<u8>>) {
2249	for extrinsic in body {
2250		let hash = sp_runtime::traits::BlakeTwo256::hash(&extrinsic);
2251		transaction.store(columns::TRANSACTION, DbHash::from_slice(hash.as_ref()), extrinsic);
2252	}
2253}
2254
2255impl<Block> sc_client_api::backend::AuxStore for Backend<Block>
2256where
2257	Block: BlockT,
2258{
2259	fn insert_aux<
2260		'a,
2261		'b: 'a,
2262		'c: 'a,
2263		I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
2264		D: IntoIterator<Item = &'a &'b [u8]>,
2265	>(
2266		&self,
2267		insert: I,
2268		delete: D,
2269	) -> ClientResult<()> {
2270		let mut transaction = Transaction::new();
2271		for (k, v) in insert {
2272			transaction.set(columns::AUX, k, v);
2273		}
2274		for k in delete {
2275			transaction.remove(columns::AUX, k);
2276		}
2277		self.storage.db.commit(transaction)?;
2278		Ok(())
2279	}
2280
2281	fn get_aux(&self, key: &[u8]) -> ClientResult<Option<Vec<u8>>> {
2282		Ok(self.storage.db.get(columns::AUX, key))
2283	}
2284}
2285
2286impl<Block: BlockT> sc_client_api::backend::Backend<Block> for Backend<Block> {
2287	type BlockImportOperation = BlockImportOperation<Block>;
2288	type Blockchain = BlockchainDb<Block>;
2289	type State = RecordStatsState<RefTrackingState<Block>, Block>;
2290	type OffchainStorage = offchain::LocalStorage;
2291
2292	fn begin_operation(&self) -> ClientResult<Self::BlockImportOperation> {
2293		Ok(BlockImportOperation {
2294			pending_block: None,
2295			old_state: self.empty_state(),
2296			db_updates: PrefixedMemoryDB::default(),
2297			storage_updates: Default::default(),
2298			child_storage_updates: Default::default(),
2299			offchain_storage_updates: Default::default(),
2300			aux_ops: Vec::new(),
2301			finalized_blocks: Vec::new(),
2302			set_head: None,
2303			commit_state: false,
2304			create_gap: true,
2305			reset_storage: false,
2306			index_ops: Default::default(),
2307		})
2308	}
2309
2310	fn begin_state_operation(
2311		&self,
2312		operation: &mut Self::BlockImportOperation,
2313		block: Block::Hash,
2314	) -> ClientResult<()> {
2315		if block == Default::default() {
2316			operation.old_state = self.empty_state();
2317		} else {
2318			operation.old_state = self.state_at(block, TrieCacheContext::Untrusted)?;
2319		}
2320
2321		operation.commit_state = true;
2322		Ok(())
2323	}
2324
2325	fn commit_operation(&self, operation: Self::BlockImportOperation) -> ClientResult<()> {
2326		let usage = operation.old_state.usage_info();
2327		self.state_usage.merge_sm(usage);
2328
2329		if let Err(e) = self.try_commit_operation(operation) {
2330			let state_meta_db = StateMetaDb(self.storage.db.clone());
2331			self.storage
2332				.state_db
2333				.reset(state_meta_db)
2334				.map_err(sp_blockchain::Error::from_state_db)?;
2335			self.blockchain.clear_pinning_cache();
2336			Err(e)
2337		} else {
2338			self.storage.state_db.sync();
2339			Ok(())
2340		}
2341	}
2342
2343	fn finalize_block(
2344		&self,
2345		hash: Block::Hash,
2346		justification: Option<Justification>,
2347	) -> ClientResult<()> {
2348		let mut transaction = Transaction::new();
2349		let header = self.blockchain.expect_header(hash)?;
2350
2351		let mut current_transaction_justifications = HashMap::new();
2352		let m = self.finalize_block_with_transaction(
2353			&mut transaction,
2354			hash,
2355			&header,
2356			None,
2357			justification,
2358			&mut current_transaction_justifications,
2359			true,
2360		)?;
2361
2362		self.storage.db.commit(transaction)?;
2363		self.blockchain.update_meta(m);
2364		Ok(())
2365	}
2366
2367	fn append_justification(
2368		&self,
2369		hash: Block::Hash,
2370		justification: Justification,
2371	) -> ClientResult<()> {
2372		let mut transaction: Transaction<DbHash> = Transaction::new();
2373		let header = self.blockchain.expect_header(hash)?;
2374		let number = *header.number();
2375
2376		// Check if the block is finalized first.
2377		let is_descendent_of = is_descendent_of(&self.blockchain, None);
2378		let last_finalized = self.blockchain.last_finalized()?;
2379
2380		// We can do a quick check first, before doing a proper but more expensive check
2381		if number > self.blockchain.info().finalized_number ||
2382			(hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?)
2383		{
2384			return Err(ClientError::NotInFinalizedChain);
2385		}
2386
2387		let justifications = if let Some(mut stored_justifications) =
2388			self.blockchain.justifications(hash)?
2389		{
2390			if !stored_justifications.append(justification) {
2391				return Err(ClientError::BadJustification("Duplicate consensus engine ID".into()));
2392			}
2393			stored_justifications
2394		} else {
2395			Justifications::from(justification)
2396		};
2397
2398		transaction.set_from_vec(
2399			columns::JUSTIFICATIONS,
2400			&utils::number_and_hash_to_lookup_key(number, hash)?,
2401			justifications.encode(),
2402		);
2403
2404		self.storage.db.commit(transaction)?;
2405
2406		Ok(())
2407	}
2408
2409	fn offchain_storage(&self) -> Option<Self::OffchainStorage> {
2410		Some(self.offchain_storage.clone())
2411	}
2412
2413	fn usage_info(&self) -> Option<UsageInfo> {
2414		let (io_stats, state_stats) = self.io_stats.take_or_else(|| {
2415			(
2416				// TODO: implement DB stats and cache size retrieval
2417				kvdb::IoStats::empty(),
2418				self.state_usage.take(),
2419			)
2420		});
2421		let database_cache = MemorySize::from_bytes(0);
2422		let state_cache = MemorySize::from_bytes(
2423			self.shared_trie_cache.as_ref().map_or(0, |c| c.used_memory_size()),
2424		);
2425
2426		Some(UsageInfo {
2427			memory: MemoryInfo { state_cache, database_cache },
2428			io: IoInfo {
2429				transactions: io_stats.transactions,
2430				bytes_read: io_stats.bytes_read,
2431				bytes_written: io_stats.bytes_written,
2432				writes: io_stats.writes,
2433				reads: io_stats.reads,
2434				average_transaction_size: io_stats.avg_transaction_size() as u64,
2435				state_reads: state_stats.reads.ops,
2436				state_writes: state_stats.writes.ops,
2437				state_writes_cache: state_stats.overlay_writes.ops,
2438				state_reads_cache: state_stats.cache_reads.ops,
2439				state_writes_nodes: state_stats.nodes_writes.ops,
2440			},
2441		})
2442	}
2443
2444	fn revert(
2445		&self,
2446		n: NumberFor<Block>,
2447		revert_finalized: bool,
2448	) -> ClientResult<(NumberFor<Block>, HashSet<Block::Hash>)> {
2449		let mut reverted_finalized = HashSet::new();
2450
2451		let info = self.blockchain.info();
2452
2453		let highest_leaf = self
2454			.blockchain
2455			.leaves
2456			.read()
2457			.highest_leaf()
2458			.and_then(|(n, h)| h.last().map(|h| (n, *h)));
2459
2460		let best_number = info.best_number;
2461		let best_hash = info.best_hash;
2462
2463		let finalized = info.finalized_number;
2464
2465		let revertible = best_number - finalized;
2466		let n = if !revert_finalized && revertible < n { revertible } else { n };
2467
2468		let (n, mut number_to_revert, mut hash_to_revert) = match highest_leaf {
2469			Some((l_n, l_h)) => (n + (l_n - best_number), l_n, l_h),
2470			None => (n, best_number, best_hash),
2471		};
2472
2473		let mut revert_blocks = || -> ClientResult<NumberFor<Block>> {
2474			for c in 0..n.saturated_into::<u64>() {
2475				if number_to_revert.is_zero() {
2476					return Ok(c.saturated_into::<NumberFor<Block>>());
2477				}
2478				let mut transaction = Transaction::new();
2479				let removed = self.blockchain.header(hash_to_revert)?.ok_or_else(|| {
2480					sp_blockchain::Error::UnknownBlock(format!(
2481						"Error reverting to {hash_to_revert}. Block header not found.",
2482					))
2483				})?;
2484				let removed_hash = hash_to_revert;
2485
2486				let prev_number = number_to_revert.saturating_sub(One::one());
2487				let prev_hash =
2488					if prev_number == best_number { best_hash } else { *removed.parent_hash() };
2489
2490				if !self.have_state_at(prev_hash, prev_number) {
2491					return Ok(c.saturated_into::<NumberFor<Block>>());
2492				}
2493
2494				match self.storage.state_db.revert_one() {
2495					Some(commit) => {
2496						apply_state_commit(&mut transaction, commit);
2497
2498						number_to_revert = prev_number;
2499						hash_to_revert = prev_hash;
2500
2501						let update_finalized = number_to_revert < finalized;
2502
2503						let key = utils::number_and_hash_to_lookup_key(
2504							number_to_revert,
2505							&hash_to_revert,
2506						)?;
2507						if update_finalized {
2508							transaction.set_from_vec(
2509								columns::META,
2510								meta_keys::FINALIZED_BLOCK,
2511								key.clone(),
2512							);
2513
2514							reverted_finalized.insert(removed_hash);
2515							if let Some((hash, _)) = self.blockchain.info().finalized_state {
2516								if hash == hash_to_revert {
2517									if !number_to_revert.is_zero() &&
2518										self.have_state_at(prev_hash, prev_number)
2519									{
2520										let lookup_key = utils::number_and_hash_to_lookup_key(
2521											prev_number,
2522											prev_hash,
2523										)?;
2524										transaction.set_from_vec(
2525											columns::META,
2526											meta_keys::FINALIZED_STATE,
2527											lookup_key,
2528										);
2529									} else {
2530										transaction
2531											.remove(columns::META, meta_keys::FINALIZED_STATE);
2532									}
2533								}
2534							}
2535						}
2536
2537						transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, key);
2538						transaction.remove(columns::KEY_LOOKUP, removed_hash.as_ref());
2539						children::remove_children(
2540							&mut transaction,
2541							columns::META,
2542							meta_keys::CHILDREN_PREFIX,
2543							hash_to_revert,
2544						);
2545						self.prune_block(&mut transaction, BlockId::Hash(removed_hash))?;
2546						remove_from_db::<Block>(
2547							&mut transaction,
2548							&*self.storage.db,
2549							columns::KEY_LOOKUP,
2550							columns::HEADER,
2551							BlockId::Hash(removed_hash),
2552						)?;
2553
2554						self.storage.db.commit(transaction)?;
2555
2556						// Clean the cache
2557						self.blockchain.remove_header_metadata(removed_hash);
2558
2559						let is_best = number_to_revert < best_number;
2560
2561						self.blockchain.update_meta(MetaUpdate {
2562							hash: hash_to_revert,
2563							number: number_to_revert,
2564							is_best,
2565							is_finalized: update_finalized,
2566							with_state: false,
2567						});
2568					},
2569					None => return Ok(c.saturated_into::<NumberFor<Block>>()),
2570				}
2571			}
2572
2573			Ok(n)
2574		};
2575
2576		let reverted = revert_blocks()?;
2577
2578		let revert_leaves = || -> ClientResult<()> {
2579			let mut transaction = Transaction::new();
2580			let mut leaves = self.blockchain.leaves.write();
2581
2582			leaves.revert(hash_to_revert, number_to_revert).into_iter().try_for_each(
2583				|(h, _)| {
2584					self.blockchain.remove_header_metadata(h);
2585					transaction.remove(columns::KEY_LOOKUP, h.as_ref());
2586
2587					self.prune_block(&mut transaction, BlockId::Hash(h))?;
2588					remove_from_db::<Block>(
2589						&mut transaction,
2590						&*self.storage.db,
2591						columns::KEY_LOOKUP,
2592						columns::HEADER,
2593						BlockId::Hash(h),
2594					)?;
2595
2596					Ok::<_, ClientError>(())
2597				},
2598			)?;
2599			leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX);
2600			self.storage.db.commit(transaction)?;
2601
2602			Ok(())
2603		};
2604
2605		revert_leaves()?;
2606
2607		Ok((reverted, reverted_finalized))
2608	}
2609
2610	fn remove_leaf_block(&self, hash: Block::Hash) -> ClientResult<()> {
2611		let best_hash = self.blockchain.info().best_hash;
2612
2613		if best_hash == hash {
2614			return Err(sp_blockchain::Error::Backend(format!("Can't remove best block {hash:?}")));
2615		}
2616
2617		let hdr = self.blockchain.header_metadata(hash)?;
2618		if !self.have_state_at(hash, hdr.number) {
2619			return Err(sp_blockchain::Error::UnknownBlock(format!(
2620				"State already discarded for {hash:?}",
2621			)));
2622		}
2623
2624		let mut leaves = self.blockchain.leaves.write();
2625		if !leaves.contains(hdr.number, hash) {
2626			return Err(sp_blockchain::Error::Backend(format!(
2627				"Can't remove non-leaf block {hash:?}",
2628			)));
2629		}
2630
2631		let mut transaction = Transaction::new();
2632		if let Some(commit) = self.storage.state_db.remove(&hash) {
2633			apply_state_commit(&mut transaction, commit);
2634		}
2635		transaction.remove(columns::KEY_LOOKUP, hash.as_ref());
2636
2637		let children: Vec<_> = self
2638			.blockchain()
2639			.children(hdr.parent)?
2640			.into_iter()
2641			.filter(|child_hash| *child_hash != hash)
2642			.collect();
2643		let parent_leaf = if children.is_empty() {
2644			children::remove_children(
2645				&mut transaction,
2646				columns::META,
2647				meta_keys::CHILDREN_PREFIX,
2648				hdr.parent,
2649			);
2650			Some(hdr.parent)
2651		} else {
2652			children::write_children(
2653				&mut transaction,
2654				columns::META,
2655				meta_keys::CHILDREN_PREFIX,
2656				hdr.parent,
2657				children,
2658			);
2659			None
2660		};
2661
2662		let remove_outcome = leaves.remove(hash, hdr.number, parent_leaf);
2663		leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX);
2664		if let Err(e) = self.storage.db.commit(transaction) {
2665			if let Some(outcome) = remove_outcome {
2666				leaves.undo().undo_remove(outcome);
2667			}
2668			return Err(e.into());
2669		}
2670		self.blockchain().remove_header_metadata(hash);
2671		Ok(())
2672	}
2673
2674	fn blockchain(&self) -> &BlockchainDb<Block> {
2675		&self.blockchain
2676	}
2677
2678	fn state_at(
2679		&self,
2680		hash: Block::Hash,
2681		trie_cache_context: TrieCacheContext,
2682	) -> ClientResult<Self::State> {
2683		if hash == self.blockchain.meta.read().genesis_hash {
2684			if let Some(genesis_state) = &*self.genesis_state.read() {
2685				let root = genesis_state.root;
2686				let db_state =
2687					DbStateBuilder::<HashingFor<Block>>::new(genesis_state.clone(), root)
2688						.with_optional_cache(self.shared_trie_cache.as_ref().map(|c| {
2689							if matches!(trie_cache_context, TrieCacheContext::Trusted) {
2690								c.local_cache_trusted()
2691							} else {
2692								c.local_cache_untrusted()
2693							}
2694						}))
2695						.build();
2696
2697				let state = RefTrackingState::new(db_state, self.storage.clone(), None);
2698				return Ok(RecordStatsState::new(state, None, self.state_usage.clone()));
2699			}
2700		}
2701
2702		match self.blockchain.header_metadata(hash) {
2703			Ok(ref hdr) => {
2704				let hint = || {
2705					sc_state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref())
2706						.unwrap_or(None)
2707						.is_some()
2708				};
2709
2710				if let Ok(()) =
2711					self.storage.state_db.pin(&hash, hdr.number.saturated_into::<u64>(), hint)
2712				{
2713					let root = hdr.state_root;
2714					let db_state =
2715						DbStateBuilder::<HashingFor<Block>>::new(self.storage.clone(), root)
2716							.with_optional_cache(self.shared_trie_cache.as_ref().map(|c| {
2717								if matches!(trie_cache_context, TrieCacheContext::Trusted) {
2718									c.local_cache_trusted()
2719								} else {
2720									c.local_cache_untrusted()
2721								}
2722							}))
2723							.build();
2724					let state = RefTrackingState::new(db_state, self.storage.clone(), Some(hash));
2725					Ok(RecordStatsState::new(state, Some(hash), self.state_usage.clone()))
2726				} else {
2727					Err(sp_blockchain::Error::UnknownBlock(format!(
2728						"State already discarded for {hash:?}",
2729					)))
2730				}
2731			},
2732			Err(e) => Err(e),
2733		}
2734	}
2735
2736	fn have_state_at(&self, hash: Block::Hash, number: NumberFor<Block>) -> bool {
2737		if self.is_archive {
2738			match self.blockchain.header_metadata(hash) {
2739				Ok(header) => sp_state_machine::Storage::get(
2740					self.storage.as_ref(),
2741					&header.state_root,
2742					(&[], None),
2743				)
2744				.unwrap_or(None)
2745				.is_some(),
2746				_ => false,
2747			}
2748		} else {
2749			match self.storage.state_db.is_pruned(&hash, number.saturated_into::<u64>()) {
2750				IsPruned::Pruned => false,
2751				IsPruned::NotPruned => true,
2752				IsPruned::MaybePruned => match self.blockchain.header_metadata(hash) {
2753					Ok(header) => sp_state_machine::Storage::get(
2754						self.storage.as_ref(),
2755						&header.state_root,
2756						(&[], None),
2757					)
2758					.unwrap_or(None)
2759					.is_some(),
2760					_ => false,
2761				},
2762			}
2763		}
2764	}
2765
2766	fn get_import_lock(&self) -> &RwLock<()> {
2767		&self.import_lock
2768	}
2769
2770	fn requires_full_sync(&self) -> bool {
2771		matches!(
2772			self.storage.state_db.pruning_mode(),
2773			PruningMode::ArchiveAll | PruningMode::ArchiveCanonical
2774		)
2775	}
2776
2777	fn pin_block(&self, hash: <Block as BlockT>::Hash) -> sp_blockchain::Result<()> {
2778		let hint = || {
2779			let header_metadata = self.blockchain.header_metadata(hash);
2780			header_metadata
2781				.map(|hdr| {
2782					sc_state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref())
2783						.unwrap_or(None)
2784						.is_some()
2785				})
2786				.unwrap_or(false)
2787		};
2788
2789		if let Some(number) = self.blockchain.number(hash)? {
2790			self.storage.state_db.pin(&hash, number.saturated_into::<u64>(), hint).map_err(
2791				|_| {
2792					sp_blockchain::Error::UnknownBlock(format!(
2793						"Unable to pin: state already discarded for `{hash:?}`",
2794					))
2795				},
2796			)?;
2797		} else {
2798			return Err(ClientError::UnknownBlock(format!(
2799				"Can not pin block with hash `{hash:?}`. Block not found.",
2800			)));
2801		}
2802
2803		if self.blocks_pruning != BlocksPruning::KeepAll {
2804			// Only increase reference count for this hash. Value is loaded once we prune.
2805			self.blockchain.bump_ref(hash);
2806		}
2807		Ok(())
2808	}
2809
2810	fn unpin_block(&self, hash: <Block as BlockT>::Hash) {
2811		self.storage.state_db.unpin(&hash);
2812
2813		if self.blocks_pruning != BlocksPruning::KeepAll {
2814			self.blockchain.unpin(hash);
2815		}
2816	}
2817}
2818
2819impl<Block: BlockT> sc_client_api::backend::LocalBackend<Block> for Backend<Block> {}
2820
2821#[cfg(test)]
2822pub(crate) mod tests {
2823	use super::*;
2824	use crate::{columns, utils::number_and_hash_to_lookup_key};
2825	use hash_db::{HashDB, EMPTY_PREFIX};
2826	use sc_client_api::{
2827		backend::{Backend as BTrait, BlockImportOperation as Op},
2828		blockchain::Backend as BLBTrait,
2829	};
2830	use sp_blockchain::{lowest_common_ancestor, tree_route};
2831	use sp_core::H256;
2832	use sp_runtime::{
2833		testing::{Block as RawBlock, Header, MockCallU64, TestXt},
2834		traits::{BlakeTwo256, Hash},
2835		ConsensusEngineId, StateVersion,
2836	};
2837
2838	const CONS0_ENGINE_ID: ConsensusEngineId = *b"CON0";
2839	const CONS1_ENGINE_ID: ConsensusEngineId = *b"CON1";
2840
2841	type UncheckedXt = TestXt<MockCallU64, ()>;
2842	pub(crate) type Block = RawBlock<UncheckedXt>;
2843
2844	pub fn insert_header(
2845		backend: &Backend<Block>,
2846		number: u64,
2847		parent_hash: H256,
2848		changes: Option<Vec<(Vec<u8>, Vec<u8>)>>,
2849		extrinsics_root: H256,
2850	) -> H256 {
2851		insert_block(backend, number, parent_hash, changes, extrinsics_root, Vec::new(), None)
2852			.unwrap()
2853	}
2854
2855	pub fn insert_block(
2856		backend: &Backend<Block>,
2857		number: u64,
2858		parent_hash: H256,
2859		_changes: Option<Vec<(Vec<u8>, Vec<u8>)>>,
2860		extrinsics_root: H256,
2861		body: Vec<UncheckedXt>,
2862		transaction_index: Option<Vec<IndexOperation>>,
2863	) -> Result<H256, sp_blockchain::Error> {
2864		use sp_runtime::testing::Digest;
2865
2866		let digest = Digest::default();
2867		let mut header =
2868			Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root };
2869
2870		let block_hash = if number == 0 { Default::default() } else { parent_hash };
2871		let mut op = backend.begin_operation().unwrap();
2872		backend.begin_state_operation(&mut op, block_hash).unwrap();
2873		if let Some(index) = transaction_index {
2874			op.update_transaction_index(index).unwrap();
2875		}
2876
2877		// Insert some fake data to ensure that the block can be found in the state column.
2878		let (root, overlay) = op.old_state.storage_root(
2879			vec![(block_hash.as_ref(), Some(block_hash.as_ref()))].into_iter(),
2880			StateVersion::V1,
2881		);
2882		op.update_db_storage(overlay).unwrap();
2883		header.state_root = root.into();
2884
2885		op.set_block_data(header.clone(), Some(body), None, None, NewBlockState::Best, true)
2886			.unwrap();
2887
2888		backend.commit_operation(op)?;
2889
2890		Ok(header.hash())
2891	}
2892
2893	pub fn insert_disconnected_header(
2894		backend: &Backend<Block>,
2895		number: u64,
2896		parent_hash: H256,
2897		extrinsics_root: H256,
2898		best: bool,
2899	) -> H256 {
2900		use sp_runtime::testing::Digest;
2901
2902		let digest = Digest::default();
2903		let header =
2904			Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root };
2905
2906		let mut op = backend.begin_operation().unwrap();
2907
2908		op.set_block_data(
2909			header.clone(),
2910			Some(vec![]),
2911			None,
2912			None,
2913			if best { NewBlockState::Best } else { NewBlockState::Normal },
2914			true,
2915		)
2916		.unwrap();
2917
2918		backend.commit_operation(op).unwrap();
2919
2920		header.hash()
2921	}
2922
2923	pub fn insert_header_no_head(
2924		backend: &Backend<Block>,
2925		number: u64,
2926		parent_hash: H256,
2927		extrinsics_root: H256,
2928	) -> H256 {
2929		use sp_runtime::testing::Digest;
2930
2931		let digest = Digest::default();
2932		let mut header =
2933			Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root };
2934		let mut op = backend.begin_operation().unwrap();
2935
2936		let root = backend
2937			.state_at(parent_hash, TrieCacheContext::Untrusted)
2938			.unwrap_or_else(|_| {
2939				if parent_hash == Default::default() {
2940					backend.empty_state()
2941				} else {
2942					panic!("Unknown block: {parent_hash:?}")
2943				}
2944			})
2945			.storage_root(
2946				vec![(parent_hash.as_ref(), Some(parent_hash.as_ref()))].into_iter(),
2947				StateVersion::V1,
2948			)
2949			.0;
2950		header.state_root = root.into();
2951
2952		op.set_block_data(header.clone(), None, None, None, NewBlockState::Normal, true)
2953			.unwrap();
2954		backend.commit_operation(op).unwrap();
2955
2956		header.hash()
2957	}
2958
2959	#[test]
2960	fn block_hash_inserted_correctly() {
2961		let backing = {
2962			let db = Backend::<Block>::new_test(1, 0);
2963			for i in 0..10 {
2964				assert!(db.blockchain().hash(i).unwrap().is_none());
2965
2966				{
2967					let hash = if i == 0 {
2968						Default::default()
2969					} else {
2970						db.blockchain.hash(i - 1).unwrap().unwrap()
2971					};
2972
2973					let mut op = db.begin_operation().unwrap();
2974					db.begin_state_operation(&mut op, hash).unwrap();
2975					let header = Header {
2976						number: i,
2977						parent_hash: hash,
2978						state_root: Default::default(),
2979						digest: Default::default(),
2980						extrinsics_root: Default::default(),
2981					};
2982
2983					op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
2984						.unwrap();
2985					db.commit_operation(op).unwrap();
2986				}
2987
2988				assert!(db.blockchain().hash(i).unwrap().is_some())
2989			}
2990			db.storage.db.clone()
2991		};
2992
2993		let backend = Backend::<Block>::new(
2994			DatabaseSettings {
2995				trie_cache_maximum_size: Some(16 * 1024 * 1024),
2996				state_pruning: Some(PruningMode::blocks_pruning(1)),
2997				source: DatabaseSource::Custom { db: backing, require_create_flag: false },
2998				blocks_pruning: BlocksPruning::KeepFinalized,
2999				pruning_filters: Default::default(),
3000				metrics_registry: None,
3001			},
3002			0,
3003		)
3004		.unwrap();
3005		assert_eq!(backend.blockchain().info().best_number, 9);
3006		for i in 0..10 {
3007			assert!(backend.blockchain().hash(i).unwrap().is_some())
3008		}
3009	}
3010
3011	#[test]
3012	fn set_state_data() {
3013		set_state_data_inner(StateVersion::V0);
3014		set_state_data_inner(StateVersion::V1);
3015	}
3016	fn set_state_data_inner(state_version: StateVersion) {
3017		let db = Backend::<Block>::new_test(2, 0);
3018		let hash = {
3019			let mut op = db.begin_operation().unwrap();
3020			let mut header = Header {
3021				number: 0,
3022				parent_hash: Default::default(),
3023				state_root: Default::default(),
3024				digest: Default::default(),
3025				extrinsics_root: Default::default(),
3026			};
3027
3028			let storage = vec![(vec![1, 3, 5], vec![2, 4, 6]), (vec![1, 2, 3], vec![9, 9, 9])];
3029
3030			header.state_root = op
3031				.old_state
3032				.storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..]))), state_version)
3033				.0
3034				.into();
3035			let hash = header.hash();
3036
3037			op.reset_storage(
3038				Storage {
3039					top: storage.into_iter().collect(),
3040					children_default: Default::default(),
3041				},
3042				state_version,
3043			)
3044			.unwrap();
3045			op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best, true)
3046				.unwrap();
3047
3048			db.commit_operation(op).unwrap();
3049
3050			let state = db.state_at(hash, TrieCacheContext::Untrusted).unwrap();
3051
3052			assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6]));
3053			assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9]));
3054			assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None);
3055
3056			hash
3057		};
3058
3059		{
3060			let mut op = db.begin_operation().unwrap();
3061			db.begin_state_operation(&mut op, hash).unwrap();
3062			let mut header = Header {
3063				number: 1,
3064				parent_hash: hash,
3065				state_root: Default::default(),
3066				digest: Default::default(),
3067				extrinsics_root: Default::default(),
3068			};
3069
3070			let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))];
3071
3072			let (root, overlay) = op.old_state.storage_root(
3073				storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
3074				state_version,
3075			);
3076			op.update_db_storage(overlay).unwrap();
3077			header.state_root = root.into();
3078
3079			op.update_storage(storage, Vec::new()).unwrap();
3080			op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best, true)
3081				.unwrap();
3082
3083			db.commit_operation(op).unwrap();
3084
3085			let state = db.state_at(header.hash(), TrieCacheContext::Untrusted).unwrap();
3086
3087			assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None);
3088			assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9]));
3089			assert_eq!(state.storage(&[5, 5, 5]).unwrap(), Some(vec![4, 5, 6]));
3090		}
3091	}
3092
3093	#[test]
3094	fn delete_only_when_negative_rc() {
3095		sp_tracing::try_init_simple();
3096		let state_version = StateVersion::default();
3097		let key;
3098		let backend = Backend::<Block>::new_test(1, 0);
3099
3100		let hash = {
3101			let mut op = backend.begin_operation().unwrap();
3102			backend.begin_state_operation(&mut op, Default::default()).unwrap();
3103			let mut header = Header {
3104				number: 0,
3105				parent_hash: Default::default(),
3106				state_root: Default::default(),
3107				digest: Default::default(),
3108				extrinsics_root: Default::default(),
3109			};
3110
3111			header.state_root =
3112				op.old_state.storage_root(std::iter::empty(), state_version).0.into();
3113			let hash = header.hash();
3114
3115			op.reset_storage(
3116				Storage { top: Default::default(), children_default: Default::default() },
3117				state_version,
3118			)
3119			.unwrap();
3120
3121			key = op.db_updates.insert(EMPTY_PREFIX, b"hello");
3122			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3123				.unwrap();
3124
3125			backend.commit_operation(op).unwrap();
3126			assert_eq!(
3127				backend
3128					.storage
3129					.db
3130					.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
3131					.unwrap(),
3132				&b"hello"[..]
3133			);
3134			hash
3135		};
3136
3137		let hashof1 = {
3138			let mut op = backend.begin_operation().unwrap();
3139			backend.begin_state_operation(&mut op, hash).unwrap();
3140			let mut header = Header {
3141				number: 1,
3142				parent_hash: hash,
3143				state_root: Default::default(),
3144				digest: Default::default(),
3145				extrinsics_root: Default::default(),
3146			};
3147
3148			let storage: Vec<(_, _)> = vec![];
3149
3150			header.state_root = op
3151				.old_state
3152				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
3153				.0
3154				.into();
3155			let hash = header.hash();
3156
3157			op.db_updates.insert(EMPTY_PREFIX, b"hello");
3158			op.db_updates.remove(&key, EMPTY_PREFIX);
3159			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3160				.unwrap();
3161
3162			backend.commit_operation(op).unwrap();
3163			assert_eq!(
3164				backend
3165					.storage
3166					.db
3167					.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
3168					.unwrap(),
3169				&b"hello"[..]
3170			);
3171			hash
3172		};
3173
3174		let hashof2 = {
3175			let mut op = backend.begin_operation().unwrap();
3176			backend.begin_state_operation(&mut op, hashof1).unwrap();
3177			let mut header = Header {
3178				number: 2,
3179				parent_hash: hashof1,
3180				state_root: Default::default(),
3181				digest: Default::default(),
3182				extrinsics_root: Default::default(),
3183			};
3184
3185			let storage: Vec<(_, _)> = vec![];
3186
3187			header.state_root = op
3188				.old_state
3189				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
3190				.0
3191				.into();
3192			let hash = header.hash();
3193
3194			op.db_updates.remove(&key, EMPTY_PREFIX);
3195			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3196				.unwrap();
3197
3198			backend.commit_operation(op).unwrap();
3199
3200			assert!(backend
3201				.storage
3202				.db
3203				.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
3204				.is_some());
3205			hash
3206		};
3207
3208		let hashof3 = {
3209			let mut op = backend.begin_operation().unwrap();
3210			backend.begin_state_operation(&mut op, hashof2).unwrap();
3211			let mut header = Header {
3212				number: 3,
3213				parent_hash: hashof2,
3214				state_root: Default::default(),
3215				digest: Default::default(),
3216				extrinsics_root: Default::default(),
3217			};
3218
3219			let storage: Vec<(_, _)> = vec![];
3220
3221			header.state_root = op
3222				.old_state
3223				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
3224				.0
3225				.into();
3226			let hash = header.hash();
3227
3228			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3229				.unwrap();
3230
3231			backend.commit_operation(op).unwrap();
3232			hash
3233		};
3234
3235		let hashof4 = {
3236			let mut op = backend.begin_operation().unwrap();
3237			backend.begin_state_operation(&mut op, hashof3).unwrap();
3238			let mut header = Header {
3239				number: 4,
3240				parent_hash: hashof3,
3241				state_root: Default::default(),
3242				digest: Default::default(),
3243				extrinsics_root: Default::default(),
3244			};
3245
3246			let storage: Vec<(_, _)> = vec![];
3247
3248			header.state_root = op
3249				.old_state
3250				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
3251				.0
3252				.into();
3253			let hash = header.hash();
3254
3255			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3256				.unwrap();
3257
3258			backend.commit_operation(op).unwrap();
3259			assert!(backend
3260				.storage
3261				.db
3262				.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
3263				.is_none());
3264			hash
3265		};
3266
3267		backend.finalize_block(hashof1, None).unwrap();
3268		backend.finalize_block(hashof2, None).unwrap();
3269		backend.finalize_block(hashof3, None).unwrap();
3270		backend.finalize_block(hashof4, None).unwrap();
3271		assert!(backend
3272			.storage
3273			.db
3274			.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
3275			.is_none());
3276	}
3277
3278	#[test]
3279	fn tree_route_works() {
3280		let backend = Backend::<Block>::new_test(1000, 100);
3281		let blockchain = backend.blockchain();
3282		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3283
3284		// fork from genesis: 3 prong.
3285		let a1 = insert_header(&backend, 1, block0, None, Default::default());
3286		let a2 = insert_header(&backend, 2, a1, None, Default::default());
3287		let a3 = insert_header(&backend, 3, a2, None, Default::default());
3288
3289		// fork from genesis: 2 prong.
3290		let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32]));
3291		let b2 = insert_header(&backend, 2, b1, None, Default::default());
3292
3293		{
3294			let tree_route = tree_route(blockchain, a1, a1).unwrap();
3295
3296			assert_eq!(tree_route.common_block().hash, a1);
3297			assert!(tree_route.retracted().is_empty());
3298			assert!(tree_route.enacted().is_empty());
3299		}
3300
3301		{
3302			let tree_route = tree_route(blockchain, a3, b2).unwrap();
3303
3304			assert_eq!(tree_route.common_block().hash, block0);
3305			assert_eq!(
3306				tree_route.retracted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3307				vec![a3, a2, a1]
3308			);
3309			assert_eq!(
3310				tree_route.enacted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3311				vec![b1, b2]
3312			);
3313		}
3314
3315		{
3316			let tree_route = tree_route(blockchain, a1, a3).unwrap();
3317
3318			assert_eq!(tree_route.common_block().hash, a1);
3319			assert!(tree_route.retracted().is_empty());
3320			assert_eq!(
3321				tree_route.enacted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3322				vec![a2, a3]
3323			);
3324		}
3325
3326		{
3327			let tree_route = tree_route(blockchain, a3, a1).unwrap();
3328
3329			assert_eq!(tree_route.common_block().hash, a1);
3330			assert_eq!(
3331				tree_route.retracted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3332				vec![a3, a2]
3333			);
3334			assert!(tree_route.enacted().is_empty());
3335		}
3336
3337		{
3338			let tree_route = tree_route(blockchain, a2, a2).unwrap();
3339
3340			assert_eq!(tree_route.common_block().hash, a2);
3341			assert!(tree_route.retracted().is_empty());
3342			assert!(tree_route.enacted().is_empty());
3343		}
3344	}
3345
3346	#[test]
3347	fn tree_route_child() {
3348		let backend = Backend::<Block>::new_test(1000, 100);
3349		let blockchain = backend.blockchain();
3350
3351		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3352		let block1 = insert_header(&backend, 1, block0, None, Default::default());
3353
3354		{
3355			let tree_route = tree_route(blockchain, block0, block1).unwrap();
3356
3357			assert_eq!(tree_route.common_block().hash, block0);
3358			assert!(tree_route.retracted().is_empty());
3359			assert_eq!(
3360				tree_route.enacted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3361				vec![block1]
3362			);
3363		}
3364	}
3365
3366	#[test]
3367	fn lowest_common_ancestor_works() {
3368		let backend = Backend::<Block>::new_test(1000, 100);
3369		let blockchain = backend.blockchain();
3370		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3371
3372		// fork from genesis: 3 prong.
3373		let a1 = insert_header(&backend, 1, block0, None, Default::default());
3374		let a2 = insert_header(&backend, 2, a1, None, Default::default());
3375		let a3 = insert_header(&backend, 3, a2, None, Default::default());
3376
3377		// fork from genesis: 2 prong.
3378		let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32]));
3379		let b2 = insert_header(&backend, 2, b1, None, Default::default());
3380
3381		{
3382			let lca = lowest_common_ancestor(blockchain, a3, b2).unwrap();
3383
3384			assert_eq!(lca.hash, block0);
3385			assert_eq!(lca.number, 0);
3386		}
3387
3388		{
3389			let lca = lowest_common_ancestor(blockchain, a1, a3).unwrap();
3390
3391			assert_eq!(lca.hash, a1);
3392			assert_eq!(lca.number, 1);
3393		}
3394
3395		{
3396			let lca = lowest_common_ancestor(blockchain, a3, a1).unwrap();
3397
3398			assert_eq!(lca.hash, a1);
3399			assert_eq!(lca.number, 1);
3400		}
3401
3402		{
3403			let lca = lowest_common_ancestor(blockchain, a2, a3).unwrap();
3404
3405			assert_eq!(lca.hash, a2);
3406			assert_eq!(lca.number, 2);
3407		}
3408
3409		{
3410			let lca = lowest_common_ancestor(blockchain, a2, a1).unwrap();
3411
3412			assert_eq!(lca.hash, a1);
3413			assert_eq!(lca.number, 1);
3414		}
3415
3416		{
3417			let lca = lowest_common_ancestor(blockchain, a2, a2).unwrap();
3418
3419			assert_eq!(lca.hash, a2);
3420			assert_eq!(lca.number, 2);
3421		}
3422	}
3423
3424	#[test]
3425	fn displaced_leaves_after_finalizing_works_with_disconnect() {
3426		// In this test we will create a situation that can typically happen after warp sync.
3427		// The situation looks like this:
3428		// g -> <unimported> -> a3 -> a4
3429		// Basically there is a gap of unimported blocks at some point in the chain.
3430		let backend = Backend::<Block>::new_test(1000, 100);
3431		let blockchain = backend.blockchain();
3432		let genesis_number = 0;
3433		let genesis_hash =
3434			insert_header(&backend, genesis_number, Default::default(), None, Default::default());
3435
3436		let a3_number = 3;
3437		let a3_hash = insert_disconnected_header(
3438			&backend,
3439			a3_number,
3440			H256::from([200; 32]),
3441			H256::from([1; 32]),
3442			true,
3443		);
3444
3445		let a4_number = 4;
3446		let a4_hash =
3447			insert_disconnected_header(&backend, a4_number, a3_hash, H256::from([2; 32]), true);
3448		{
3449			let displaced = blockchain
3450				.displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([200; 32]))
3451				.unwrap();
3452			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, genesis_hash]);
3453			assert_eq!(displaced.displaced_leaves, vec![(genesis_number, genesis_hash)]);
3454			assert_eq!(displaced.displaced_blocks, vec![]);
3455		}
3456
3457		{
3458			let displaced = blockchain
3459				.displaced_leaves_after_finalizing(a4_hash, a4_number, a3_hash)
3460				.unwrap();
3461			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, genesis_hash]);
3462			assert_eq!(displaced.displaced_leaves, vec![(genesis_number, genesis_hash)]);
3463			assert_eq!(displaced.displaced_blocks, vec![]);
3464		}
3465
3466		// Import block a1 which has the genesis block as parent.
3467		// g -> a1 -> <unimported> -> a3(f) -> a4
3468		let a1_number = 1;
3469		let a1_hash = insert_disconnected_header(
3470			&backend,
3471			a1_number,
3472			genesis_hash,
3473			H256::from([123; 32]),
3474			false,
3475		);
3476		{
3477			let displaced = blockchain
3478				.displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([2; 32]))
3479				.unwrap();
3480			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, a1_hash]);
3481			assert_eq!(displaced.displaced_leaves, vec![]);
3482			assert_eq!(displaced.displaced_blocks, vec![]);
3483		}
3484
3485		// Import block b1 which has the genesis block as parent.
3486		// g -> a1 -> <unimported> -> a3(f) -> a4
3487		//  \-> b1
3488		let b1_number = 1;
3489		let b1_hash = insert_disconnected_header(
3490			&backend,
3491			b1_number,
3492			genesis_hash,
3493			H256::from([124; 32]),
3494			false,
3495		);
3496		{
3497			let displaced = blockchain
3498				.displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([2; 32]))
3499				.unwrap();
3500			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, a1_hash, b1_hash]);
3501			assert_eq!(displaced.displaced_leaves, vec![]);
3502			assert_eq!(displaced.displaced_blocks, vec![]);
3503		}
3504
3505		// If branch of b blocks is higher in number than a branch, we
3506		// should still not prune disconnected leafs.
3507		// g -> a1 -> <unimported> -> a3(f) -> a4
3508		//  \-> b1 -> b2 ----------> b3 ----> b4 -> b5
3509		let b2_number = 2;
3510		let b2_hash =
3511			insert_disconnected_header(&backend, b2_number, b1_hash, H256::from([40; 32]), false);
3512		let b3_number = 3;
3513		let b3_hash =
3514			insert_disconnected_header(&backend, b3_number, b2_hash, H256::from([41; 32]), false);
3515		let b4_number = 4;
3516		let b4_hash =
3517			insert_disconnected_header(&backend, b4_number, b3_hash, H256::from([42; 32]), false);
3518		let b5_number = 5;
3519		let b5_hash =
3520			insert_disconnected_header(&backend, b5_number, b4_hash, H256::from([43; 32]), false);
3521		{
3522			let displaced = blockchain
3523				.displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([2; 32]))
3524				.unwrap();
3525			assert_eq!(blockchain.leaves().unwrap(), vec![b5_hash, a4_hash, a1_hash]);
3526			assert_eq!(displaced.displaced_leaves, vec![]);
3527			assert_eq!(displaced.displaced_blocks, vec![]);
3528		}
3529
3530		// Even though there is a disconnect, diplace should still detect
3531		// branches above the block gap.
3532		//                              /-> c4
3533		// g -> a1 -> <unimported> -> a3 -> a4(f)
3534		//  \-> b1 -> b2 ----------> b3 -> b4 -> b5
3535		let c4_number = 4;
3536		let c4_hash =
3537			insert_disconnected_header(&backend, c4_number, a3_hash, H256::from([44; 32]), false);
3538		{
3539			let displaced = blockchain
3540				.displaced_leaves_after_finalizing(a4_hash, a4_number, a3_hash)
3541				.unwrap();
3542			assert_eq!(blockchain.leaves().unwrap(), vec![b5_hash, a4_hash, c4_hash, a1_hash]);
3543			assert_eq!(displaced.displaced_leaves, vec![(c4_number, c4_hash)]);
3544			assert_eq!(displaced.displaced_blocks, vec![c4_hash]);
3545		}
3546	}
3547
3548	#[test]
3549	fn disconnected_blocks_do_not_become_leaves_and_warp_sync_scenario() {
3550		// Simulate a realistic case:
3551		//
3552		// 1. Import genesis (block #0) normally — becomes a leaf.
3553		// 2. Import warp sync proof blocks at #5, #10, #15 without leaf registration. Their parents
3554		//    are NOT in the DB. They must NOT appear as leaves.
3555		// 3. Import block #20 as Final. Its parent (#19) is not in the DB. Being Final, it updates
3556		//    finalized number to 20.
3557		// 4. Import blocks #1..#19 with Normal state (gap sync). Since last_finalized_num is now 20
3558		//    and each block number < 20, the leaf condition (number > last_finalized_num ||
3559		//    last_finalized_num.is_zero()) is FALSE — they must NOT become leaves.
3560		// 5. Assert throughout and verify displaced_leaves_after_finalizing works cleanly with no
3561		//    disconnected proof blocks in the displaced list.
3562
3563		let backend = Backend::<Block>::new_test(1000, 100);
3564		let blockchain = backend.blockchain();
3565
3566		let insert_block_raw = |number: u64,
3567		                        parent_hash: H256,
3568		                        ext_root: H256,
3569		                        state: NewBlockState,
3570		                        register_as_leaf: bool|
3571		 -> H256 {
3572			use sp_runtime::testing::Digest;
3573			let digest = Digest::default();
3574			let header = Header {
3575				number,
3576				parent_hash,
3577				state_root: Default::default(),
3578				digest,
3579				extrinsics_root: ext_root,
3580			};
3581			let mut op = backend.begin_operation().unwrap();
3582			op.set_block_data(header.clone(), Some(vec![]), None, None, state, register_as_leaf)
3583				.unwrap();
3584			backend.commit_operation(op).unwrap();
3585			header.hash()
3586		};
3587
3588		// --- Step 1: import genesis ---
3589		let genesis_hash = insert_header(&backend, 0, Default::default(), None, Default::default());
3590		assert_eq!(blockchain.leaves().unwrap(), vec![genesis_hash]);
3591
3592		// --- Step 2: import warp sync proof blocks without leaf registration ---
3593		// These simulate authority-set-change blocks from the warp sync proof.
3594		// Their parents are NOT in the DB.
3595		let _proof5_hash = insert_block_raw(
3596			5,
3597			H256::from([5; 32]),
3598			H256::from([50; 32]),
3599			NewBlockState::Normal,
3600			false,
3601		);
3602		let _proof10_hash = insert_block_raw(
3603			10,
3604			H256::from([10; 32]),
3605			H256::from([100; 32]),
3606			NewBlockState::Normal,
3607			false,
3608		);
3609		let _proof15_hash = insert_block_raw(
3610			15,
3611			H256::from([15; 32]),
3612			H256::from([150; 32]),
3613			NewBlockState::Normal,
3614			false,
3615		);
3616
3617		// Leaves must still only contain genesis.
3618		assert_eq!(blockchain.leaves().unwrap(), vec![genesis_hash]);
3619
3620		// The disconnected blocks should still be retrievable from the DB.
3621		assert!(blockchain.header(_proof5_hash).unwrap().is_some());
3622		assert!(blockchain.header(_proof10_hash).unwrap().is_some());
3623		assert!(blockchain.header(_proof15_hash).unwrap().is_some());
3624
3625		// --- Step 3: import warp sync target block #20 as Final ---
3626		// Parent (#19) is not in the DB. Use the same low-level approach but with
3627		// NewBlockState::Final. Being Final, it will be set as best + finalized.
3628		let block20_hash = insert_block_raw(
3629			20,
3630			H256::from([19; 32]),
3631			H256::from([200; 32]),
3632			NewBlockState::Final,
3633			true,
3634		);
3635
3636		// Block #20 should now be a leaf (it's best and finalized).
3637		let leaves = blockchain.leaves().unwrap();
3638		assert!(leaves.contains(&block20_hash));
3639		// Verify finalized number was updated to 20.
3640		assert_eq!(blockchain.info().finalized_number, 20);
3641		assert_eq!(blockchain.info().finalized_hash, block20_hash);
3642		// Disconnected proof blocks must still not be leaves.
3643		assert!(!leaves.contains(&_proof5_hash));
3644		assert!(!leaves.contains(&_proof10_hash));
3645		assert!(!leaves.contains(&_proof15_hash));
3646
3647		// --- Step 4: import gap sync blocks #1..#19 with Normal state ---
3648		// Since last_finalized_num is 20, each block with number < 20 should NOT
3649		// become a leaf (the condition `number > last_finalized_num` is false).
3650		// Build the chain: genesis -> #1 -> #2 -> ... -> #19.
3651		let mut prev_hash = genesis_hash;
3652		let mut gap_hashes = Vec::new();
3653		for n in 1..=19 {
3654			let h = insert_disconnected_header(&backend, n, prev_hash, Default::default(), false);
3655			gap_hashes.push(h);
3656			prev_hash = h;
3657		}
3658
3659		// Verify gap sync blocks did NOT create new leaves.
3660		let leaves = blockchain.leaves().unwrap();
3661		for (i, gap_hash) in gap_hashes.iter().enumerate() {
3662			assert!(
3663				!leaves.contains(gap_hash),
3664				"Gap sync block #{} should not be a leaf, but it is",
3665				i + 1,
3666			);
3667		}
3668		// Block #20 should still be a leaf.
3669		assert!(leaves.contains(&block20_hash));
3670		// Disconnected proof blocks must still not be leaves.
3671		assert!(!leaves.contains(&_proof5_hash));
3672		assert!(!leaves.contains(&_proof10_hash));
3673		assert!(!leaves.contains(&_proof15_hash));
3674
3675		// --- Step 5: verify displaced_leaves_after_finalizing works cleanly ---
3676		// Call it for block #20 to verify no disconnected proof blocks appear
3677		// in the displaced list and it completes without errors.
3678		{
3679			let displaced = blockchain
3680				.displaced_leaves_after_finalizing(
3681					block20_hash,
3682					20,
3683					H256::from([19; 32]), // parent hash of block #20
3684				)
3685				.unwrap();
3686			// Disconnected proof blocks were never leaves, so they must not
3687			// appear in displaced_leaves.
3688			assert!(!displaced.displaced_leaves.iter().any(|(_, h)| *h == _proof5_hash),);
3689			assert!(!displaced.displaced_leaves.iter().any(|(_, h)| *h == _proof10_hash),);
3690			assert!(!displaced.displaced_leaves.iter().any(|(_, h)| *h == _proof15_hash),);
3691			// None of the gap sync blocks should be displaced leaves either
3692			// (they were never added as leaves).
3693			for gap_hash in &gap_hashes {
3694				assert!(!displaced.displaced_leaves.iter().any(|(_, h)| h == gap_hash),);
3695			}
3696		}
3697	}
3698
3699	#[test]
3700	fn displaced_leaves_after_finalizing_works() {
3701		let backend = Backend::<Block>::new_test(1000, 100);
3702		let blockchain = backend.blockchain();
3703		let genesis_number = 0;
3704		let genesis_hash =
3705			insert_header(&backend, genesis_number, Default::default(), None, Default::default());
3706
3707		// fork from genesis: 3 prong.
3708		// block 0 -> a1 -> a2 -> a3
3709		//        \
3710		//         -> b1 -> b2 -> c1 -> c2
3711		//              \
3712		//               -> d1 -> d2
3713		let a1_number = 1;
3714		let a1_hash = insert_header(&backend, a1_number, genesis_hash, None, Default::default());
3715		let a2_number = 2;
3716		let a2_hash = insert_header(&backend, a2_number, a1_hash, None, Default::default());
3717		let a3_number = 3;
3718		let a3_hash = insert_header(&backend, a3_number, a2_hash, None, Default::default());
3719
3720		{
3721			let displaced = blockchain
3722				.displaced_leaves_after_finalizing(genesis_hash, genesis_number, Default::default())
3723				.unwrap();
3724			assert_eq!(displaced.displaced_leaves, vec![]);
3725			assert_eq!(displaced.displaced_blocks, vec![]);
3726		}
3727		{
3728			let displaced_a1 = blockchain
3729				.displaced_leaves_after_finalizing(a1_hash, a1_number, genesis_hash)
3730				.unwrap();
3731			assert_eq!(displaced_a1.displaced_leaves, vec![]);
3732			assert_eq!(displaced_a1.displaced_blocks, vec![]);
3733
3734			let displaced_a2 = blockchain
3735				.displaced_leaves_after_finalizing(a2_hash, a2_number, a1_hash)
3736				.unwrap();
3737			assert_eq!(displaced_a2.displaced_leaves, vec![]);
3738			assert_eq!(displaced_a2.displaced_blocks, vec![]);
3739
3740			let displaced_a3 = blockchain
3741				.displaced_leaves_after_finalizing(a3_hash, a3_number, a2_hash)
3742				.unwrap();
3743			assert_eq!(displaced_a3.displaced_leaves, vec![]);
3744			assert_eq!(displaced_a3.displaced_blocks, vec![]);
3745		}
3746		{
3747			// Finalized block is above leaves and not imported yet.
3748			// We will not be able to make a connection,
3749			// nothing can be marked as displaced.
3750			let displaced = blockchain
3751				.displaced_leaves_after_finalizing(H256::from([57; 32]), 10, H256::from([56; 32]))
3752				.unwrap();
3753			assert_eq!(displaced.displaced_leaves, vec![]);
3754			assert_eq!(displaced.displaced_blocks, vec![]);
3755		}
3756
3757		// fork from genesis: 2 prong.
3758		let b1_number = 1;
3759		let b1_hash = insert_header(&backend, b1_number, genesis_hash, None, H256::from([1; 32]));
3760		let b2_number = 2;
3761		let b2_hash = insert_header(&backend, b2_number, b1_hash, None, Default::default());
3762
3763		// fork from b2.
3764		let c1_number = 3;
3765		let c1_hash = insert_header(&backend, c1_number, b2_hash, None, H256::from([2; 32]));
3766		let c2_number = 4;
3767		let c2_hash = insert_header(&backend, c2_number, c1_hash, None, Default::default());
3768
3769		// fork from b1.
3770		let d1_number = 2;
3771		let d1_hash = insert_header(&backend, d1_number, b1_hash, None, H256::from([3; 32]));
3772		let d2_number = 3;
3773		let d2_hash = insert_header(&backend, d2_number, d1_hash, None, Default::default());
3774
3775		{
3776			let displaced_a1 = blockchain
3777				.displaced_leaves_after_finalizing(a1_hash, a1_number, genesis_hash)
3778				.unwrap();
3779			assert_eq!(
3780				displaced_a1.displaced_leaves,
3781				vec![(c2_number, c2_hash), (d2_number, d2_hash)]
3782			);
3783			let mut displaced_blocks = vec![b1_hash, b2_hash, c1_hash, c2_hash, d1_hash, d2_hash];
3784			displaced_blocks.sort();
3785			assert_eq!(displaced_a1.displaced_blocks, displaced_blocks);
3786
3787			let displaced_a2 = blockchain
3788				.displaced_leaves_after_finalizing(a2_hash, a2_number, a1_hash)
3789				.unwrap();
3790			assert_eq!(displaced_a1.displaced_leaves, displaced_a2.displaced_leaves);
3791			assert_eq!(displaced_a1.displaced_blocks, displaced_a2.displaced_blocks);
3792
3793			let displaced_a3 = blockchain
3794				.displaced_leaves_after_finalizing(a3_hash, a3_number, a2_hash)
3795				.unwrap();
3796			assert_eq!(displaced_a1.displaced_leaves, displaced_a3.displaced_leaves);
3797			assert_eq!(displaced_a1.displaced_blocks, displaced_a3.displaced_blocks);
3798		}
3799		{
3800			let displaced = blockchain
3801				.displaced_leaves_after_finalizing(b1_hash, b1_number, genesis_hash)
3802				.unwrap();
3803			assert_eq!(displaced.displaced_leaves, vec![(a3_number, a3_hash)]);
3804			let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash];
3805			displaced_blocks.sort();
3806			assert_eq!(displaced.displaced_blocks, displaced_blocks);
3807		}
3808		{
3809			let displaced = blockchain
3810				.displaced_leaves_after_finalizing(b2_hash, b2_number, b1_hash)
3811				.unwrap();
3812			assert_eq!(
3813				displaced.displaced_leaves,
3814				vec![(a3_number, a3_hash), (d2_number, d2_hash)]
3815			);
3816			let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash, d1_hash, d2_hash];
3817			displaced_blocks.sort();
3818			assert_eq!(displaced.displaced_blocks, displaced_blocks);
3819		}
3820		{
3821			let displaced = blockchain
3822				.displaced_leaves_after_finalizing(c2_hash, c2_number, c1_hash)
3823				.unwrap();
3824			assert_eq!(
3825				displaced.displaced_leaves,
3826				vec![(a3_number, a3_hash), (d2_number, d2_hash)]
3827			);
3828			let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash, d1_hash, d2_hash];
3829			displaced_blocks.sort();
3830			assert_eq!(displaced.displaced_blocks, displaced_blocks);
3831		}
3832	}
3833
3834	#[test]
3835	fn test_tree_route_regression() {
3836		// NOTE: this is a test for a regression introduced in #3665, the result
3837		// of tree_route would be erroneously computed, since it was taking into
3838		// account the `ancestor` in `CachedHeaderMetadata` for the comparison.
3839		// in this test we simulate the same behavior with the side-effect
3840		// triggering the issue being eviction of a previously fetched record
3841		// from the cache, therefore this test is dependent on the LRU cache
3842		// size for header metadata, which is currently set to 5000 elements.
3843		let backend = Backend::<Block>::new_test(10000, 10000);
3844		let blockchain = backend.blockchain();
3845
3846		let genesis = insert_header(&backend, 0, Default::default(), None, Default::default());
3847
3848		let block100 = (1..=100).fold(genesis, |parent, n| {
3849			insert_header(&backend, n, parent, None, Default::default())
3850		});
3851
3852		let block7000 = (101..=7000).fold(block100, |parent, n| {
3853			insert_header(&backend, n, parent, None, Default::default())
3854		});
3855
3856		// This will cause the ancestor of `block100` to be set to `genesis` as a side-effect.
3857		lowest_common_ancestor(blockchain, genesis, block100).unwrap();
3858
3859		// While traversing the tree we will have to do 6900 calls to
3860		// `header_metadata`, which will make sure we will exhaust our cache
3861		// which only takes 5000 elements. In particular, the `CachedHeaderMetadata` struct for
3862		// block #100 will be evicted and will get a new value (with ancestor set to its parent).
3863		let tree_route = tree_route(blockchain, block100, block7000).unwrap();
3864
3865		assert!(tree_route.retracted().is_empty());
3866	}
3867
3868	#[test]
3869	fn test_leaves_with_complex_block_tree() {
3870		let backend: Arc<Backend<substrate_test_runtime_client::runtime::Block>> =
3871			Arc::new(Backend::new_test(20, 20));
3872		substrate_test_runtime_client::trait_tests::test_leaves_for_backend(backend);
3873	}
3874
3875	#[test]
3876	fn test_children_with_complex_block_tree() {
3877		let backend: Arc<Backend<substrate_test_runtime_client::runtime::Block>> =
3878			Arc::new(Backend::new_test(20, 20));
3879		substrate_test_runtime_client::trait_tests::test_children_for_backend(backend);
3880	}
3881
3882	#[test]
3883	fn test_blockchain_query_by_number_gets_canonical() {
3884		let backend: Arc<Backend<substrate_test_runtime_client::runtime::Block>> =
3885			Arc::new(Backend::new_test(20, 20));
3886		substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical(
3887			backend,
3888		);
3889	}
3890
3891	#[test]
3892	fn test_leaves_pruned_on_finality() {
3893		//   / 1b - 2b - 3b
3894		// 0 - 1a - 2a
3895		//   \ 1c
3896		let backend: Backend<Block> = Backend::new_test(10, 10);
3897		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3898
3899		let block1_a = insert_header(&backend, 1, block0, None, Default::default());
3900		let block1_b = insert_header(&backend, 1, block0, None, [1; 32].into());
3901		let block1_c = insert_header(&backend, 1, block0, None, [2; 32].into());
3902
3903		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block1_a, block1_b, block1_c]);
3904
3905		let block2_a = insert_header(&backend, 2, block1_a, None, Default::default());
3906		let block2_b = insert_header(&backend, 2, block1_b, None, Default::default());
3907
3908		let block3_b = insert_header(&backend, 3, block2_b, None, [3; 32].into());
3909
3910		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block3_b, block2_a, block1_c]);
3911
3912		backend.finalize_block(block1_a, None).unwrap();
3913		backend.finalize_block(block2_a, None).unwrap();
3914
3915		// All leaves are pruned that are known to not belong to canonical branch
3916		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]);
3917	}
3918
3919	#[test]
3920	fn test_aux() {
3921		let backend: Backend<substrate_test_runtime_client::runtime::Block> =
3922			Backend::new_test(0, 0);
3923		assert!(backend.get_aux(b"test").unwrap().is_none());
3924		backend.insert_aux(&[(&b"test"[..], &b"hello"[..])], &[]).unwrap();
3925		assert_eq!(b"hello", &backend.get_aux(b"test").unwrap().unwrap()[..]);
3926		backend.insert_aux(&[], &[&b"test"[..]]).unwrap();
3927		assert!(backend.get_aux(b"test").unwrap().is_none());
3928	}
3929
3930	#[test]
3931	fn test_finalize_block_with_justification() {
3932		use sc_client_api::blockchain::Backend as BlockChainBackend;
3933
3934		let backend = Backend::<Block>::new_test(10, 10);
3935
3936		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3937		let block1 = insert_header(&backend, 1, block0, None, Default::default());
3938
3939		let justification = Some((CONS0_ENGINE_ID, vec![1, 2, 3]));
3940		backend.finalize_block(block1, justification.clone()).unwrap();
3941
3942		assert_eq!(
3943			backend.blockchain().justifications(block1).unwrap(),
3944			justification.map(Justifications::from),
3945		);
3946	}
3947
3948	#[test]
3949	fn test_append_justification_to_finalized_block() {
3950		use sc_client_api::blockchain::Backend as BlockChainBackend;
3951
3952		let backend = Backend::<Block>::new_test(10, 10);
3953
3954		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3955		let block1 = insert_header(&backend, 1, block0, None, Default::default());
3956
3957		let just0 = (CONS0_ENGINE_ID, vec![1, 2, 3]);
3958		backend.finalize_block(block1, Some(just0.clone().into())).unwrap();
3959
3960		let just1 = (CONS1_ENGINE_ID, vec![4, 5]);
3961		backend.append_justification(block1, just1.clone()).unwrap();
3962
3963		let just2 = (CONS1_ENGINE_ID, vec![6, 7]);
3964		assert!(matches!(
3965			backend.append_justification(block1, just2),
3966			Err(ClientError::BadJustification(_))
3967		));
3968
3969		let justifications = {
3970			let mut just = Justifications::from(just0);
3971			just.append(just1);
3972			just
3973		};
3974		assert_eq!(backend.blockchain().justifications(block1).unwrap(), Some(justifications),);
3975	}
3976
3977	#[test]
3978	fn test_finalize_multiple_blocks_in_single_op() {
3979		let backend = Backend::<Block>::new_test(10, 10);
3980
3981		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3982		let block1 = insert_header(&backend, 1, block0, None, Default::default());
3983		let block2 = insert_header(&backend, 2, block1, None, Default::default());
3984		let block3 = insert_header(&backend, 3, block2, None, Default::default());
3985		let block4 = insert_header(&backend, 4, block3, None, Default::default());
3986		{
3987			let mut op = backend.begin_operation().unwrap();
3988			backend.begin_state_operation(&mut op, block0).unwrap();
3989			op.mark_finalized(block1, None).unwrap();
3990			op.mark_finalized(block2, None).unwrap();
3991			backend.commit_operation(op).unwrap();
3992		}
3993		{
3994			let mut op = backend.begin_operation().unwrap();
3995			backend.begin_state_operation(&mut op, block2).unwrap();
3996			op.mark_finalized(block3, None).unwrap();
3997			op.mark_finalized(block4, None).unwrap();
3998			backend.commit_operation(op).unwrap();
3999		}
4000	}
4001
4002	#[test]
4003	fn storage_hash_is_cached_correctly() {
4004		let state_version = StateVersion::default();
4005		let backend = Backend::<Block>::new_test(10, 10);
4006
4007		let hash0 = {
4008			let mut op = backend.begin_operation().unwrap();
4009			backend.begin_state_operation(&mut op, Default::default()).unwrap();
4010			let mut header = Header {
4011				number: 0,
4012				parent_hash: Default::default(),
4013				state_root: Default::default(),
4014				digest: Default::default(),
4015				extrinsics_root: Default::default(),
4016			};
4017
4018			let storage = vec![(b"test".to_vec(), b"test".to_vec())];
4019
4020			header.state_root = op
4021				.old_state
4022				.storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..]))), state_version)
4023				.0
4024				.into();
4025			let hash = header.hash();
4026
4027			op.reset_storage(
4028				Storage {
4029					top: storage.into_iter().collect(),
4030					children_default: Default::default(),
4031				},
4032				state_version,
4033			)
4034			.unwrap();
4035			op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best, true)
4036				.unwrap();
4037
4038			backend.commit_operation(op).unwrap();
4039
4040			hash
4041		};
4042
4043		let block0_hash = backend
4044			.state_at(hash0, TrieCacheContext::Untrusted)
4045			.unwrap()
4046			.storage_hash(&b"test"[..])
4047			.unwrap();
4048
4049		let hash1 = {
4050			let mut op = backend.begin_operation().unwrap();
4051			backend.begin_state_operation(&mut op, hash0).unwrap();
4052			let mut header = Header {
4053				number: 1,
4054				parent_hash: hash0,
4055				state_root: Default::default(),
4056				digest: Default::default(),
4057				extrinsics_root: Default::default(),
4058			};
4059
4060			let storage = vec![(b"test".to_vec(), Some(b"test2".to_vec()))];
4061
4062			let (root, overlay) = op.old_state.storage_root(
4063				storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
4064				state_version,
4065			);
4066			op.update_db_storage(overlay).unwrap();
4067			header.state_root = root.into();
4068			let hash = header.hash();
4069
4070			op.update_storage(storage, Vec::new()).unwrap();
4071			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Normal, true)
4072				.unwrap();
4073
4074			backend.commit_operation(op).unwrap();
4075
4076			hash
4077		};
4078
4079		{
4080			let header = backend.blockchain().header(hash1).unwrap().unwrap();
4081			let mut op = backend.begin_operation().unwrap();
4082			op.set_block_data(header, None, None, None, NewBlockState::Best, true).unwrap();
4083			backend.commit_operation(op).unwrap();
4084		}
4085
4086		let block1_hash = backend
4087			.state_at(hash1, TrieCacheContext::Untrusted)
4088			.unwrap()
4089			.storage_hash(&b"test"[..])
4090			.unwrap();
4091
4092		assert_ne!(block0_hash, block1_hash);
4093	}
4094
4095	#[test]
4096	fn test_finalize_non_sequential() {
4097		let backend = Backend::<Block>::new_test(10, 10);
4098
4099		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4100		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4101		let block2 = insert_header(&backend, 2, block1, None, Default::default());
4102		{
4103			let mut op = backend.begin_operation().unwrap();
4104			backend.begin_state_operation(&mut op, block0).unwrap();
4105			op.mark_finalized(block2, None).unwrap();
4106			backend.commit_operation(op).unwrap_err();
4107		}
4108	}
4109
4110	#[test]
4111	fn prune_blocks_on_finalize() {
4112		let pruning_modes =
4113			vec![BlocksPruning::Some(2), BlocksPruning::KeepFinalized, BlocksPruning::KeepAll];
4114
4115		for pruning_mode in pruning_modes {
4116			let backend = Backend::<Block>::new_test_with_tx_storage(pruning_mode, 0);
4117			let mut blocks = Vec::new();
4118			let mut prev_hash = Default::default();
4119			for i in 0..5 {
4120				let hash = insert_block(
4121					&backend,
4122					i,
4123					prev_hash,
4124					None,
4125					Default::default(),
4126					vec![UncheckedXt::new_transaction(i.into(), ())],
4127					None,
4128				)
4129				.unwrap();
4130				blocks.push(hash);
4131				prev_hash = hash;
4132			}
4133
4134			{
4135				let mut op = backend.begin_operation().unwrap();
4136				backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4137				for i in 1..5 {
4138					op.mark_finalized(blocks[i], None).unwrap();
4139				}
4140				backend.commit_operation(op).unwrap();
4141			}
4142			let bc = backend.blockchain();
4143
4144			if matches!(pruning_mode, BlocksPruning::Some(_)) {
4145				assert_eq!(None, bc.body(blocks[0]).unwrap());
4146				assert_eq!(None, bc.body(blocks[1]).unwrap());
4147				assert_eq!(None, bc.body(blocks[2]).unwrap());
4148				assert_eq!(
4149					Some(vec![UncheckedXt::new_transaction(3.into(), ())]),
4150					bc.body(blocks[3]).unwrap()
4151				);
4152				assert_eq!(
4153					Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
4154					bc.body(blocks[4]).unwrap()
4155				);
4156			} else {
4157				for i in 0..5 {
4158					assert_eq!(
4159						Some(vec![UncheckedXt::new_transaction((i as u64).into(), ())]),
4160						bc.body(blocks[i]).unwrap()
4161					);
4162				}
4163			}
4164		}
4165	}
4166
4167	#[test]
4168	fn prune_blocks_on_finalize_with_fork() {
4169		sp_tracing::try_init_simple();
4170
4171		let pruning_modes =
4172			vec![BlocksPruning::Some(2), BlocksPruning::KeepFinalized, BlocksPruning::KeepAll];
4173
4174		for pruning in pruning_modes {
4175			let backend = Backend::<Block>::new_test_with_tx_storage(pruning, 10);
4176			let mut blocks = Vec::new();
4177			let mut prev_hash = Default::default();
4178			for i in 0..5 {
4179				let hash = insert_block(
4180					&backend,
4181					i,
4182					prev_hash,
4183					None,
4184					Default::default(),
4185					vec![UncheckedXt::new_transaction(i.into(), ())],
4186					None,
4187				)
4188				.unwrap();
4189				blocks.push(hash);
4190				prev_hash = hash;
4191			}
4192
4193			// insert a fork at block 2
4194			let fork_hash_root = insert_block(
4195				&backend,
4196				2,
4197				blocks[1],
4198				None,
4199				H256::random(),
4200				vec![UncheckedXt::new_transaction(2.into(), ())],
4201				None,
4202			)
4203			.unwrap();
4204			insert_block(
4205				&backend,
4206				3,
4207				fork_hash_root,
4208				None,
4209				H256::random(),
4210				vec![
4211					UncheckedXt::new_transaction(3.into(), ()),
4212					UncheckedXt::new_transaction(11.into(), ()),
4213				],
4214				None,
4215			)
4216			.unwrap();
4217			let mut op = backend.begin_operation().unwrap();
4218			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4219			op.mark_head(blocks[4]).unwrap();
4220			backend.commit_operation(op).unwrap();
4221
4222			let bc = backend.blockchain();
4223			assert_eq!(
4224				Some(vec![UncheckedXt::new_transaction(2.into(), ())]),
4225				bc.body(fork_hash_root).unwrap()
4226			);
4227
4228			for i in 1..5 {
4229				let mut op = backend.begin_operation().unwrap();
4230				backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4231				op.mark_finalized(blocks[i], None).unwrap();
4232				backend.commit_operation(op).unwrap();
4233			}
4234
4235			if matches!(pruning, BlocksPruning::Some(_)) {
4236				assert_eq!(None, bc.body(blocks[0]).unwrap());
4237				assert_eq!(None, bc.body(blocks[1]).unwrap());
4238				assert_eq!(None, bc.body(blocks[2]).unwrap());
4239
4240				assert_eq!(
4241					Some(vec![UncheckedXt::new_transaction(3.into(), ())]),
4242					bc.body(blocks[3]).unwrap()
4243				);
4244				assert_eq!(
4245					Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
4246					bc.body(blocks[4]).unwrap()
4247				);
4248			} else {
4249				for i in 0..5 {
4250					assert_eq!(
4251						Some(vec![UncheckedXt::new_transaction((i as u64).into(), ())]),
4252						bc.body(blocks[i]).unwrap()
4253					);
4254				}
4255			}
4256
4257			if matches!(pruning, BlocksPruning::KeepAll) {
4258				assert_eq!(
4259					Some(vec![UncheckedXt::new_transaction(2.into(), ())]),
4260					bc.body(fork_hash_root).unwrap()
4261				);
4262			} else {
4263				assert_eq!(None, bc.body(fork_hash_root).unwrap());
4264			}
4265
4266			assert_eq!(bc.info().best_number, 4);
4267			for i in 0..5 {
4268				assert!(bc.hash(i).unwrap().is_some());
4269			}
4270		}
4271	}
4272
4273	#[test]
4274	fn prune_blocks_on_finalize_and_reorg() {
4275		// 	0 - 1b
4276		// 	\ - 1a - 2a - 3a
4277		// 	     \ - 2b
4278
4279		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(10), 10);
4280
4281		let make_block = |index, parent, val: u64| {
4282			insert_block(
4283				&backend,
4284				index,
4285				parent,
4286				None,
4287				H256::random(),
4288				vec![UncheckedXt::new_transaction(val.into(), ())],
4289				None,
4290			)
4291			.unwrap()
4292		};
4293
4294		let block_0 = make_block(0, Default::default(), 0x00);
4295		let block_1a = make_block(1, block_0, 0x1a);
4296		let block_1b = make_block(1, block_0, 0x1b);
4297		let block_2a = make_block(2, block_1a, 0x2a);
4298		let block_2b = make_block(2, block_1a, 0x2b);
4299		let block_3a = make_block(3, block_2a, 0x3a);
4300
4301		// Make sure 1b is head
4302		let mut op = backend.begin_operation().unwrap();
4303		backend.begin_state_operation(&mut op, block_0).unwrap();
4304		op.mark_head(block_1b).unwrap();
4305		backend.commit_operation(op).unwrap();
4306
4307		// Finalize 3a
4308		let mut op = backend.begin_operation().unwrap();
4309		backend.begin_state_operation(&mut op, block_0).unwrap();
4310		op.mark_head(block_3a).unwrap();
4311		op.mark_finalized(block_1a, None).unwrap();
4312		op.mark_finalized(block_2a, None).unwrap();
4313		op.mark_finalized(block_3a, None).unwrap();
4314		backend.commit_operation(op).unwrap();
4315
4316		let bc = backend.blockchain();
4317		assert_eq!(None, bc.body(block_1b).unwrap());
4318		assert_eq!(None, bc.body(block_2b).unwrap());
4319		assert_eq!(
4320			Some(vec![UncheckedXt::new_transaction(0x00.into(), ())]),
4321			bc.body(block_0).unwrap()
4322		);
4323		assert_eq!(
4324			Some(vec![UncheckedXt::new_transaction(0x1a.into(), ())]),
4325			bc.body(block_1a).unwrap()
4326		);
4327		assert_eq!(
4328			Some(vec![UncheckedXt::new_transaction(0x2a.into(), ())]),
4329			bc.body(block_2a).unwrap()
4330		);
4331		assert_eq!(
4332			Some(vec![UncheckedXt::new_transaction(0x3a.into(), ())]),
4333			bc.body(block_3a).unwrap()
4334		);
4335	}
4336
4337	#[test]
4338	fn indexed_data_block_body() {
4339		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
4340
4341		let x0 = UncheckedXt::new_transaction(0.into(), ()).encode();
4342		let x1 = UncheckedXt::new_transaction(1.into(), ()).encode();
4343		let x0_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x0[1..]);
4344		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
4345		let index = vec![
4346			IndexOperation::Insert {
4347				extrinsic: 0,
4348				hash: x0_hash.as_ref().to_vec(),
4349				size: (x0.len() - 1) as u32,
4350			},
4351			IndexOperation::Insert {
4352				extrinsic: 1,
4353				hash: x1_hash.as_ref().to_vec(),
4354				size: (x1.len() - 1) as u32,
4355			},
4356		];
4357		let hash = insert_block(
4358			&backend,
4359			0,
4360			Default::default(),
4361			None,
4362			Default::default(),
4363			vec![
4364				UncheckedXt::new_transaction(0.into(), ()),
4365				UncheckedXt::new_transaction(1.into(), ()),
4366			],
4367			Some(index),
4368		)
4369		.unwrap();
4370		let bc = backend.blockchain();
4371		assert_eq!(bc.indexed_transaction(x0_hash).unwrap().unwrap(), &x0[1..]);
4372		assert_eq!(bc.indexed_transaction(x1_hash).unwrap().unwrap(), &x1[1..]);
4373
4374		let hashof0 = bc.info().genesis_hash;
4375		// Push one more blocks and make sure block is pruned and transaction index is cleared.
4376		let block1 =
4377			insert_block(&backend, 1, hash, None, Default::default(), vec![], None).unwrap();
4378		backend.finalize_block(block1, None).unwrap();
4379		assert_eq!(bc.body(hashof0).unwrap(), None);
4380		assert_eq!(bc.indexed_transaction(x0_hash).unwrap(), None);
4381		assert_eq!(bc.indexed_transaction(x1_hash).unwrap(), None);
4382	}
4383
4384	#[test]
4385	fn index_invalid_size() {
4386		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
4387
4388		let x0 = UncheckedXt::new_transaction(0.into(), ()).encode();
4389		let x1 = UncheckedXt::new_transaction(1.into(), ()).encode();
4390
4391		let x0_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x0[..]);
4392		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[..]);
4393		let index = vec![
4394			IndexOperation::Insert {
4395				extrinsic: 0,
4396				hash: x0_hash.as_ref().to_vec(),
4397				size: (x0.len()) as u32,
4398			},
4399			IndexOperation::Insert {
4400				extrinsic: 1,
4401				hash: x1_hash.as_ref().to_vec(),
4402				size: (x1.len() + 1) as u32,
4403			},
4404		];
4405		insert_block(
4406			&backend,
4407			0,
4408			Default::default(),
4409			None,
4410			Default::default(),
4411			vec![
4412				UncheckedXt::new_transaction(0.into(), ()),
4413				UncheckedXt::new_transaction(1.into(), ()),
4414			],
4415			Some(index),
4416		)
4417		.unwrap();
4418		let bc = backend.blockchain();
4419		assert_eq!(bc.indexed_transaction(x0_hash).unwrap().unwrap(), &x0[..]);
4420		assert_eq!(bc.indexed_transaction(x1_hash).unwrap(), None);
4421	}
4422
4423	#[test]
4424	fn renew_transaction_storage() {
4425		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
4426		let mut blocks = Vec::new();
4427		let mut prev_hash = Default::default();
4428		let x1 = UncheckedXt::new_transaction(0.into(), ()).encode();
4429		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
4430		for i in 0..10 {
4431			let mut index = Vec::new();
4432			if i == 0 {
4433				index.push(IndexOperation::Insert {
4434					extrinsic: 0,
4435					hash: x1_hash.as_ref().to_vec(),
4436					size: (x1.len() - 1) as u32,
4437				});
4438			} else if i < 5 {
4439				// keep renewing 1st
4440				index.push(IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() });
4441			} // else stop renewing
4442			let hash = insert_block(
4443				&backend,
4444				i,
4445				prev_hash,
4446				None,
4447				Default::default(),
4448				vec![UncheckedXt::new_transaction(i.into(), ())],
4449				Some(index),
4450			)
4451			.unwrap();
4452			blocks.push(hash);
4453			prev_hash = hash;
4454		}
4455
4456		for i in 1..10 {
4457			let mut op = backend.begin_operation().unwrap();
4458			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4459			op.mark_finalized(blocks[i], None).unwrap();
4460			backend.commit_operation(op).unwrap();
4461			let bc = backend.blockchain();
4462			if i < 6 {
4463				assert!(bc.indexed_transaction(x1_hash).unwrap().is_some());
4464			} else {
4465				assert!(bc.indexed_transaction(x1_hash).unwrap().is_none());
4466			}
4467		}
4468	}
4469
4470	#[test]
4471	fn remove_leaf_block_works() {
4472		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
4473		let mut blocks = Vec::new();
4474		let mut prev_hash = Default::default();
4475		for i in 0..2 {
4476			let hash = insert_block(
4477				&backend,
4478				i,
4479				prev_hash,
4480				None,
4481				Default::default(),
4482				vec![UncheckedXt::new_transaction(i.into(), ())],
4483				None,
4484			)
4485			.unwrap();
4486			blocks.push(hash);
4487			prev_hash = hash;
4488		}
4489
4490		for i in 0..2 {
4491			let hash = insert_block(
4492				&backend,
4493				2,
4494				blocks[1],
4495				None,
4496				sp_core::H256::random(),
4497				vec![UncheckedXt::new_transaction(i.into(), ())],
4498				None,
4499			)
4500			.unwrap();
4501			blocks.push(hash);
4502		}
4503
4504		// insert a fork at block 1, which becomes best block
4505		let best_hash = insert_block(
4506			&backend,
4507			1,
4508			blocks[0],
4509			None,
4510			sp_core::H256::random(),
4511			vec![UncheckedXt::new_transaction(42.into(), ())],
4512			None,
4513		)
4514		.unwrap();
4515
4516		assert_eq!(backend.blockchain().info().best_hash, best_hash);
4517		assert!(backend.remove_leaf_block(best_hash).is_err());
4518
4519		assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], blocks[3], best_hash]);
4520		assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2], blocks[3]]);
4521
4522		assert!(backend.have_state_at(blocks[3], 2));
4523		assert!(backend.blockchain().header(blocks[3]).unwrap().is_some());
4524		backend.remove_leaf_block(blocks[3]).unwrap();
4525		assert!(!backend.have_state_at(blocks[3], 2));
4526		assert!(backend.blockchain().header(blocks[3]).unwrap().is_none());
4527		assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], best_hash]);
4528		assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2]]);
4529
4530		assert!(backend.have_state_at(blocks[2], 2));
4531		assert!(backend.blockchain().header(blocks[2]).unwrap().is_some());
4532		backend.remove_leaf_block(blocks[2]).unwrap();
4533		assert!(!backend.have_state_at(blocks[2], 2));
4534		assert!(backend.blockchain().header(blocks[2]).unwrap().is_none());
4535		assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash, blocks[1]]);
4536		assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![]);
4537
4538		assert!(backend.have_state_at(blocks[1], 1));
4539		assert!(backend.blockchain().header(blocks[1]).unwrap().is_some());
4540		backend.remove_leaf_block(blocks[1]).unwrap();
4541		assert!(!backend.have_state_at(blocks[1], 1));
4542		assert!(backend.blockchain().header(blocks[1]).unwrap().is_none());
4543		assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash]);
4544		assert_eq!(backend.blockchain().children(blocks[0]).unwrap(), vec![best_hash]);
4545	}
4546
4547	#[test]
4548	fn test_import_existing_block_as_new_head() {
4549		let backend: Backend<Block> = Backend::new_test(10, 3);
4550		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4551		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4552		let block2 = insert_header(&backend, 2, block1, None, Default::default());
4553		let block3 = insert_header(&backend, 3, block2, None, Default::default());
4554		let block4 = insert_header(&backend, 4, block3, None, Default::default());
4555		let block5 = insert_header(&backend, 5, block4, None, Default::default());
4556		assert_eq!(backend.blockchain().info().best_hash, block5);
4557
4558		// Insert 1 as best again. This should fail because canonicalization_delay == 3 and best ==
4559		// 5
4560		let header = Header {
4561			number: 1,
4562			parent_hash: block0,
4563			state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
4564			digest: Default::default(),
4565			extrinsics_root: Default::default(),
4566		};
4567		let mut op = backend.begin_operation().unwrap();
4568		op.set_block_data(header, None, None, None, NewBlockState::Best, true).unwrap();
4569		assert!(matches!(backend.commit_operation(op), Err(sp_blockchain::Error::SetHeadTooOld)));
4570
4571		// Insert 2 as best again.
4572		let header = backend.blockchain().header(block2).unwrap().unwrap();
4573		let mut op = backend.begin_operation().unwrap();
4574		op.set_block_data(header, None, None, None, NewBlockState::Best, true).unwrap();
4575		backend.commit_operation(op).unwrap();
4576		assert_eq!(backend.blockchain().info().best_hash, block2);
4577	}
4578
4579	#[test]
4580	fn test_import_existing_block_as_final() {
4581		let backend: Backend<Block> = Backend::new_test(10, 10);
4582		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4583		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4584		let _block2 = insert_header(&backend, 2, block1, None, Default::default());
4585		// Genesis is auto finalized, the rest are not.
4586		assert_eq!(backend.blockchain().info().finalized_hash, block0);
4587
4588		// Insert 1 as final again.
4589		let header = backend.blockchain().header(block1).unwrap().unwrap();
4590
4591		let mut op = backend.begin_operation().unwrap();
4592		op.set_block_data(header, None, None, None, NewBlockState::Final, true).unwrap();
4593		backend.commit_operation(op).unwrap();
4594
4595		assert_eq!(backend.blockchain().info().finalized_hash, block1);
4596	}
4597
4598	#[test]
4599	fn test_import_existing_state_fails() {
4600		let backend: Backend<Block> = Backend::new_test(10, 10);
4601		let genesis =
4602			insert_block(&backend, 0, Default::default(), None, Default::default(), vec![], None)
4603				.unwrap();
4604
4605		insert_block(&backend, 1, genesis, None, Default::default(), vec![], None).unwrap();
4606		let err = insert_block(&backend, 1, genesis, None, Default::default(), vec![], None)
4607			.err()
4608			.unwrap();
4609		match err {
4610			sp_blockchain::Error::StateDatabase(m) if m == "Block already exists" => (),
4611			e @ _ => panic!("Unexpected error {:?}", e),
4612		}
4613	}
4614
4615	#[test]
4616	fn test_leaves_not_created_for_ancient_blocks() {
4617		let backend: Backend<Block> = Backend::new_test(10, 10);
4618		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4619
4620		let block1_a = insert_header(&backend, 1, block0, None, Default::default());
4621		let block2_a = insert_header(&backend, 2, block1_a, None, Default::default());
4622		backend.finalize_block(block1_a, None).unwrap();
4623		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]);
4624
4625		// Insert a fork prior to finalization point. Leave should not be created.
4626		insert_header_no_head(&backend, 1, block0, [1; 32].into());
4627		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]);
4628	}
4629
4630	#[test]
4631	fn revert_non_best_blocks() {
4632		let backend = Backend::<Block>::new_test(10, 10);
4633
4634		let genesis =
4635			insert_block(&backend, 0, Default::default(), None, Default::default(), vec![], None)
4636				.unwrap();
4637
4638		let block1 =
4639			insert_block(&backend, 1, genesis, None, Default::default(), vec![], None).unwrap();
4640
4641		let block2 =
4642			insert_block(&backend, 2, block1, None, Default::default(), vec![], None).unwrap();
4643
4644		let block3 = {
4645			let mut op = backend.begin_operation().unwrap();
4646			backend.begin_state_operation(&mut op, block1).unwrap();
4647			let header = Header {
4648				number: 3,
4649				parent_hash: block2,
4650				state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
4651				digest: Default::default(),
4652				extrinsics_root: Default::default(),
4653			};
4654
4655			op.set_block_data(
4656				header.clone(),
4657				Some(Vec::new()),
4658				None,
4659				None,
4660				NewBlockState::Normal,
4661				true,
4662			)
4663			.unwrap();
4664
4665			backend.commit_operation(op).unwrap();
4666
4667			header.hash()
4668		};
4669
4670		let block4 = {
4671			let mut op = backend.begin_operation().unwrap();
4672			backend.begin_state_operation(&mut op, block2).unwrap();
4673			let header = Header {
4674				number: 4,
4675				parent_hash: block3,
4676				state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
4677				digest: Default::default(),
4678				extrinsics_root: Default::default(),
4679			};
4680
4681			op.set_block_data(
4682				header.clone(),
4683				Some(Vec::new()),
4684				None,
4685				None,
4686				NewBlockState::Normal,
4687				true,
4688			)
4689			.unwrap();
4690
4691			backend.commit_operation(op).unwrap();
4692
4693			header.hash()
4694		};
4695
4696		let block3_fork = {
4697			let mut op = backend.begin_operation().unwrap();
4698			backend.begin_state_operation(&mut op, block2).unwrap();
4699			let header = Header {
4700				number: 3,
4701				parent_hash: block2,
4702				state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
4703				digest: Default::default(),
4704				extrinsics_root: H256::from_low_u64_le(42),
4705			};
4706
4707			op.set_block_data(
4708				header.clone(),
4709				Some(Vec::new()),
4710				None,
4711				None,
4712				NewBlockState::Normal,
4713				true,
4714			)
4715			.unwrap();
4716
4717			backend.commit_operation(op).unwrap();
4718
4719			header.hash()
4720		};
4721
4722		assert!(backend.have_state_at(block1, 1));
4723		assert!(backend.have_state_at(block2, 2));
4724		assert!(backend.have_state_at(block3, 3));
4725		assert!(backend.have_state_at(block4, 4));
4726		assert!(backend.have_state_at(block3_fork, 3));
4727
4728		assert_eq!(backend.blockchain.leaves().unwrap(), vec![block4, block3_fork]);
4729		assert_eq!(4, backend.blockchain.leaves.read().highest_leaf().unwrap().0);
4730
4731		assert_eq!(3, backend.revert(1, false).unwrap().0);
4732
4733		assert!(backend.have_state_at(block1, 1));
4734
4735		let ensure_pruned = |hash, number: u32| {
4736			assert_eq!(
4737				backend.blockchain.status(hash).unwrap(),
4738				sc_client_api::blockchain::BlockStatus::Unknown
4739			);
4740			assert!(
4741				backend
4742					.blockchain
4743					.db
4744					.get(columns::BODY, &number_and_hash_to_lookup_key(number, hash).unwrap())
4745					.is_none(),
4746				"{number}"
4747			);
4748			assert!(
4749				backend
4750					.blockchain
4751					.db
4752					.get(columns::HEADER, &number_and_hash_to_lookup_key(number, hash).unwrap())
4753					.is_none(),
4754				"{number}"
4755			);
4756		};
4757
4758		ensure_pruned(block2, 2);
4759		ensure_pruned(block3, 3);
4760		ensure_pruned(block4, 4);
4761		ensure_pruned(block3_fork, 3);
4762
4763		assert_eq!(backend.blockchain.leaves().unwrap(), vec![block1]);
4764		assert_eq!(1, backend.blockchain.leaves.read().highest_leaf().unwrap().0);
4765	}
4766
4767	#[test]
4768	fn revert_finalized_blocks() {
4769		let pruning_modes = [BlocksPruning::Some(10), BlocksPruning::KeepAll];
4770
4771		// we will create a chain with 11 blocks, finalize block #8 and then
4772		// attempt to revert 5 blocks.
4773		for pruning_mode in pruning_modes {
4774			let backend = Backend::<Block>::new_test_with_tx_storage(pruning_mode, 1);
4775
4776			let mut parent = Default::default();
4777			for i in 0..=10 {
4778				parent = insert_block(&backend, i, parent, None, Default::default(), vec![], None)
4779					.unwrap();
4780			}
4781
4782			assert_eq!(backend.blockchain().info().best_number, 10);
4783
4784			let block8 = backend.blockchain().hash(8).unwrap().unwrap();
4785			backend.finalize_block(block8, None).unwrap();
4786			backend.revert(5, true).unwrap();
4787
4788			match pruning_mode {
4789				// we can only revert to blocks for which we have state, if pruning is enabled
4790				// then the last state available will be that of the latest finalized block
4791				BlocksPruning::Some(_) => {
4792					assert_eq!(backend.blockchain().info().finalized_number, 8)
4793				},
4794				// otherwise if we're not doing state pruning we can revert past finalized blocks
4795				_ => assert_eq!(backend.blockchain().info().finalized_number, 5),
4796			}
4797		}
4798	}
4799
4800	#[test]
4801	fn test_no_duplicated_leaves_allowed() {
4802		let backend: Backend<Block> = Backend::new_test(10, 10);
4803		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4804		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4805		// Add block 2 not as the best block
4806		let block2 = insert_header_no_head(&backend, 2, block1, Default::default());
4807		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]);
4808		assert_eq!(backend.blockchain().info().best_hash, block1);
4809
4810		// Add block 2 as the best block
4811		let block2 = insert_header(&backend, 2, block1, None, Default::default());
4812		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]);
4813		assert_eq!(backend.blockchain().info().best_hash, block2);
4814	}
4815
4816	#[test]
4817	fn force_delayed_canonicalize_waiting_for_blocks_to_be_finalized() {
4818		let pruning_modes =
4819			[BlocksPruning::Some(10), BlocksPruning::KeepAll, BlocksPruning::KeepFinalized];
4820
4821		for pruning_mode in pruning_modes {
4822			eprintln!("Running with pruning mode: {:?}", pruning_mode);
4823
4824			let backend = Backend::<Block>::new_test_with_tx_storage(pruning_mode, 1);
4825
4826			let genesis = insert_block(
4827				&backend,
4828				0,
4829				Default::default(),
4830				None,
4831				Default::default(),
4832				vec![],
4833				None,
4834			)
4835			.unwrap();
4836
4837			let block1 = {
4838				let mut op = backend.begin_operation().unwrap();
4839				backend.begin_state_operation(&mut op, genesis).unwrap();
4840				let mut header = Header {
4841					number: 1,
4842					parent_hash: genesis,
4843					state_root: Default::default(),
4844					digest: Default::default(),
4845					extrinsics_root: Default::default(),
4846				};
4847
4848				let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))];
4849
4850				let (root, overlay) = op.old_state.storage_root(
4851					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
4852					StateVersion::V1,
4853				);
4854				op.update_db_storage(overlay).unwrap();
4855				header.state_root = root.into();
4856
4857				op.update_storage(storage, Vec::new()).unwrap();
4858
4859				op.set_block_data(
4860					header.clone(),
4861					Some(Vec::new()),
4862					None,
4863					None,
4864					NewBlockState::Normal,
4865					true,
4866				)
4867				.unwrap();
4868
4869				backend.commit_operation(op).unwrap();
4870
4871				header.hash()
4872			};
4873
4874			if matches!(pruning_mode, BlocksPruning::Some(_)) {
4875				assert_eq!(
4876					LastCanonicalized::Block(0),
4877					backend.storage.state_db.last_canonicalized()
4878				);
4879			}
4880
4881			// This should not trigger any forced canonicalization as we didn't have imported any
4882			// best block by now.
4883			let block2 = {
4884				let mut op = backend.begin_operation().unwrap();
4885				backend.begin_state_operation(&mut op, block1).unwrap();
4886				let mut header = Header {
4887					number: 2,
4888					parent_hash: block1,
4889					state_root: Default::default(),
4890					digest: Default::default(),
4891					extrinsics_root: Default::default(),
4892				};
4893
4894				let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 2]))];
4895
4896				let (root, overlay) = op.old_state.storage_root(
4897					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
4898					StateVersion::V1,
4899				);
4900				op.update_db_storage(overlay).unwrap();
4901				header.state_root = root.into();
4902
4903				op.update_storage(storage, Vec::new()).unwrap();
4904
4905				op.set_block_data(
4906					header.clone(),
4907					Some(Vec::new()),
4908					None,
4909					None,
4910					NewBlockState::Normal,
4911					true,
4912				)
4913				.unwrap();
4914
4915				backend.commit_operation(op).unwrap();
4916
4917				header.hash()
4918			};
4919
4920			if matches!(pruning_mode, BlocksPruning::Some(_)) {
4921				assert_eq!(
4922					LastCanonicalized::Block(0),
4923					backend.storage.state_db.last_canonicalized()
4924				);
4925			}
4926
4927			// This should also not trigger it yet, because we import a best block, but the best
4928			// block from the POV of the db is still at `0`.
4929			let block3 = {
4930				let mut op = backend.begin_operation().unwrap();
4931				backend.begin_state_operation(&mut op, block2).unwrap();
4932				let mut header = Header {
4933					number: 3,
4934					parent_hash: block2,
4935					state_root: Default::default(),
4936					digest: Default::default(),
4937					extrinsics_root: Default::default(),
4938				};
4939
4940				let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 3]))];
4941
4942				let (root, overlay) = op.old_state.storage_root(
4943					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
4944					StateVersion::V1,
4945				);
4946				op.update_db_storage(overlay).unwrap();
4947				header.state_root = root.into();
4948
4949				op.update_storage(storage, Vec::new()).unwrap();
4950
4951				op.set_block_data(
4952					header.clone(),
4953					Some(Vec::new()),
4954					None,
4955					None,
4956					NewBlockState::Best,
4957					true,
4958				)
4959				.unwrap();
4960
4961				backend.commit_operation(op).unwrap();
4962
4963				header.hash()
4964			};
4965
4966			// Now it should kick in.
4967			let block4 = {
4968				let mut op = backend.begin_operation().unwrap();
4969				backend.begin_state_operation(&mut op, block3).unwrap();
4970				let mut header = Header {
4971					number: 4,
4972					parent_hash: block3,
4973					state_root: Default::default(),
4974					digest: Default::default(),
4975					extrinsics_root: Default::default(),
4976				};
4977
4978				let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 4]))];
4979
4980				let (root, overlay) = op.old_state.storage_root(
4981					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
4982					StateVersion::V1,
4983				);
4984				op.update_db_storage(overlay).unwrap();
4985				header.state_root = root.into();
4986
4987				op.update_storage(storage, Vec::new()).unwrap();
4988
4989				op.set_block_data(
4990					header.clone(),
4991					Some(Vec::new()),
4992					None,
4993					None,
4994					NewBlockState::Best,
4995					true,
4996				)
4997				.unwrap();
4998
4999				backend.commit_operation(op).unwrap();
5000
5001				header.hash()
5002			};
5003
5004			if matches!(pruning_mode, BlocksPruning::Some(_)) {
5005				assert_eq!(
5006					LastCanonicalized::Block(2),
5007					backend.storage.state_db.last_canonicalized()
5008				);
5009			}
5010
5011			assert_eq!(block1, backend.blockchain().hash(1).unwrap().unwrap());
5012			assert_eq!(block2, backend.blockchain().hash(2).unwrap().unwrap());
5013			assert_eq!(block3, backend.blockchain().hash(3).unwrap().unwrap());
5014			assert_eq!(block4, backend.blockchain().hash(4).unwrap().unwrap());
5015		}
5016	}
5017
5018	#[test]
5019	fn test_pinned_blocks_on_finalize() {
5020		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
5021		let mut blocks = Vec::new();
5022		let mut prev_hash = Default::default();
5023
5024		let build_justification = |i: u64| ([0, 0, 0, 0], vec![i.try_into().unwrap()]);
5025		// Block tree:
5026		//   0 -> 1 -> 2 -> 3 -> 4
5027		for i in 0..5 {
5028			let hash = insert_block(
5029				&backend,
5030				i,
5031				prev_hash,
5032				None,
5033				Default::default(),
5034				vec![UncheckedXt::new_transaction(i.into(), ())],
5035				None,
5036			)
5037			.unwrap();
5038			blocks.push(hash);
5039			// Avoid block pruning.
5040			backend.pin_block(blocks[i as usize]).unwrap();
5041
5042			prev_hash = hash;
5043		}
5044
5045		let bc = backend.blockchain();
5046
5047		// Check that we can properly access values when there is reference count
5048		// but no value.
5049		assert_eq!(
5050			Some(vec![UncheckedXt::new_transaction(1.into(), ())]),
5051			bc.body(blocks[1]).unwrap()
5052		);
5053
5054		// Block 1 gets pinned three times
5055		backend.pin_block(blocks[1]).unwrap();
5056		backend.pin_block(blocks[1]).unwrap();
5057
5058		// Finalize all blocks. This will trigger pruning.
5059		let mut op = backend.begin_operation().unwrap();
5060		backend.begin_state_operation(&mut op, blocks[4]).unwrap();
5061		for i in 1..5 {
5062			op.mark_finalized(blocks[i], Some(build_justification(i.try_into().unwrap())))
5063				.unwrap();
5064		}
5065		backend.commit_operation(op).unwrap();
5066
5067		// Block 0, 1, 2, 3 are pinned, so all values should be cached.
5068		// Block 4 is inside the pruning window, its value is in db.
5069		assert_eq!(
5070			Some(vec![UncheckedXt::new_transaction(0.into(), ())]),
5071			bc.body(blocks[0]).unwrap()
5072		);
5073
5074		assert_eq!(
5075			Some(vec![UncheckedXt::new_transaction(1.into(), ())]),
5076			bc.body(blocks[1]).unwrap()
5077		);
5078		assert_eq!(
5079			Some(Justifications::from(build_justification(1))),
5080			bc.justifications(blocks[1]).unwrap()
5081		);
5082
5083		assert_eq!(
5084			Some(vec![UncheckedXt::new_transaction(2.into(), ())]),
5085			bc.body(blocks[2]).unwrap()
5086		);
5087		assert_eq!(
5088			Some(Justifications::from(build_justification(2))),
5089			bc.justifications(blocks[2]).unwrap()
5090		);
5091
5092		assert_eq!(
5093			Some(vec![UncheckedXt::new_transaction(3.into(), ())]),
5094			bc.body(blocks[3]).unwrap()
5095		);
5096		assert_eq!(
5097			Some(Justifications::from(build_justification(3))),
5098			bc.justifications(blocks[3]).unwrap()
5099		);
5100
5101		assert_eq!(
5102			Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
5103			bc.body(blocks[4]).unwrap()
5104		);
5105		assert_eq!(
5106			Some(Justifications::from(build_justification(4))),
5107			bc.justifications(blocks[4]).unwrap()
5108		);
5109
5110		// Unpin all blocks. Values should be removed from cache.
5111		for block in &blocks {
5112			backend.unpin_block(*block);
5113		}
5114
5115		assert!(bc.body(blocks[0]).unwrap().is_none());
5116		// Block 1 was pinned twice, we expect it to be still cached
5117		assert!(bc.body(blocks[1]).unwrap().is_some());
5118		assert!(bc.justifications(blocks[1]).unwrap().is_some());
5119		// Headers should also be available while pinned
5120		assert!(bc.header(blocks[1]).ok().flatten().is_some());
5121		assert!(bc.body(blocks[2]).unwrap().is_none());
5122		assert!(bc.justifications(blocks[2]).unwrap().is_none());
5123		assert!(bc.body(blocks[3]).unwrap().is_none());
5124		assert!(bc.justifications(blocks[3]).unwrap().is_none());
5125
5126		// After these unpins, block 1 should also be removed
5127		backend.unpin_block(blocks[1]);
5128		assert!(bc.body(blocks[1]).unwrap().is_some());
5129		assert!(bc.justifications(blocks[1]).unwrap().is_some());
5130		backend.unpin_block(blocks[1]);
5131		assert!(bc.body(blocks[1]).unwrap().is_none());
5132		assert!(bc.justifications(blocks[1]).unwrap().is_none());
5133
5134		// Block 4 is inside the pruning window and still kept
5135		assert_eq!(
5136			Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
5137			bc.body(blocks[4]).unwrap()
5138		);
5139		assert_eq!(
5140			Some(Justifications::from(build_justification(4))),
5141			bc.justifications(blocks[4]).unwrap()
5142		);
5143
5144		// Block tree:
5145		//   0 -> 1 -> 2 -> 3 -> 4 -> 5
5146		let hash = insert_block(
5147			&backend,
5148			5,
5149			prev_hash,
5150			None,
5151			Default::default(),
5152			vec![UncheckedXt::new_transaction(5.into(), ())],
5153			None,
5154		)
5155		.unwrap();
5156		blocks.push(hash);
5157
5158		backend.pin_block(blocks[4]).unwrap();
5159		// Mark block 5 as finalized.
5160		let mut op = backend.begin_operation().unwrap();
5161		backend.begin_state_operation(&mut op, blocks[5]).unwrap();
5162		op.mark_finalized(blocks[5], Some(build_justification(5))).unwrap();
5163		backend.commit_operation(op).unwrap();
5164
5165		assert!(bc.body(blocks[0]).unwrap().is_none());
5166		assert!(bc.body(blocks[1]).unwrap().is_none());
5167		assert!(bc.body(blocks[2]).unwrap().is_none());
5168		assert!(bc.body(blocks[3]).unwrap().is_none());
5169
5170		assert_eq!(
5171			Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
5172			bc.body(blocks[4]).unwrap()
5173		);
5174		assert_eq!(
5175			Some(Justifications::from(build_justification(4))),
5176			bc.justifications(blocks[4]).unwrap()
5177		);
5178		assert_eq!(
5179			Some(vec![UncheckedXt::new_transaction(5.into(), ())]),
5180			bc.body(blocks[5]).unwrap()
5181		);
5182		assert!(bc.header(blocks[5]).ok().flatten().is_some());
5183
5184		backend.unpin_block(blocks[4]);
5185		assert!(bc.body(blocks[4]).unwrap().is_none());
5186		assert!(bc.justifications(blocks[4]).unwrap().is_none());
5187
5188		// Append a justification to block 5.
5189		backend.append_justification(blocks[5], ([0, 0, 0, 1], vec![42])).unwrap();
5190
5191		let hash = insert_block(
5192			&backend,
5193			6,
5194			blocks[5],
5195			None,
5196			Default::default(),
5197			vec![UncheckedXt::new_transaction(6.into(), ())],
5198			None,
5199		)
5200		.unwrap();
5201		blocks.push(hash);
5202
5203		// Pin block 5 so it gets loaded into the cache on prune
5204		backend.pin_block(blocks[5]).unwrap();
5205
5206		// Finalize block 6 so block 5 gets pruned. Since it is pinned both justifications should be
5207		// in memory.
5208		let mut op = backend.begin_operation().unwrap();
5209		backend.begin_state_operation(&mut op, blocks[6]).unwrap();
5210		op.mark_finalized(blocks[6], None).unwrap();
5211		backend.commit_operation(op).unwrap();
5212
5213		assert_eq!(
5214			Some(vec![UncheckedXt::new_transaction(5.into(), ())]),
5215			bc.body(blocks[5]).unwrap()
5216		);
5217		assert!(bc.header(blocks[5]).ok().flatten().is_some());
5218		let mut expected = Justifications::from(build_justification(5));
5219		expected.append(([0, 0, 0, 1], vec![42]));
5220		assert_eq!(Some(expected), bc.justifications(blocks[5]).unwrap());
5221	}
5222
5223	#[test]
5224	fn test_pinned_blocks_on_finalize_with_fork() {
5225		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
5226		let mut blocks = Vec::new();
5227		let mut prev_hash = Default::default();
5228
5229		// Block tree:
5230		//   0 -> 1 -> 2 -> 3 -> 4
5231		for i in 0..5 {
5232			let hash = insert_block(
5233				&backend,
5234				i,
5235				prev_hash,
5236				None,
5237				Default::default(),
5238				vec![UncheckedXt::new_transaction(i.into(), ())],
5239				None,
5240			)
5241			.unwrap();
5242			blocks.push(hash);
5243
5244			// Avoid block pruning.
5245			backend.pin_block(blocks[i as usize]).unwrap();
5246
5247			prev_hash = hash;
5248		}
5249
5250		// Insert a fork at the second block.
5251		// Block tree:
5252		//   0 -> 1 -> 2 -> 3 -> 4
5253		//        \ -> 2 -> 3
5254		let fork_hash_root = insert_block(
5255			&backend,
5256			2,
5257			blocks[1],
5258			None,
5259			H256::random(),
5260			vec![UncheckedXt::new_transaction(2.into(), ())],
5261			None,
5262		)
5263		.unwrap();
5264		let fork_hash_3 = insert_block(
5265			&backend,
5266			3,
5267			fork_hash_root,
5268			None,
5269			H256::random(),
5270			vec![
5271				UncheckedXt::new_transaction(3.into(), ()),
5272				UncheckedXt::new_transaction(11.into(), ()),
5273			],
5274			None,
5275		)
5276		.unwrap();
5277
5278		// Do not prune the fork hash.
5279		backend.pin_block(fork_hash_3).unwrap();
5280
5281		let mut op = backend.begin_operation().unwrap();
5282		backend.begin_state_operation(&mut op, blocks[4]).unwrap();
5283		op.mark_head(blocks[4]).unwrap();
5284		backend.commit_operation(op).unwrap();
5285
5286		for i in 1..5 {
5287			let mut op = backend.begin_operation().unwrap();
5288			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
5289			op.mark_finalized(blocks[i], None).unwrap();
5290			backend.commit_operation(op).unwrap();
5291		}
5292
5293		let bc = backend.blockchain();
5294		assert_eq!(
5295			Some(vec![UncheckedXt::new_transaction(0.into(), ())]),
5296			bc.body(blocks[0]).unwrap()
5297		);
5298		assert_eq!(
5299			Some(vec![UncheckedXt::new_transaction(1.into(), ())]),
5300			bc.body(blocks[1]).unwrap()
5301		);
5302		assert_eq!(
5303			Some(vec![UncheckedXt::new_transaction(2.into(), ())]),
5304			bc.body(blocks[2]).unwrap()
5305		);
5306		assert_eq!(
5307			Some(vec![UncheckedXt::new_transaction(3.into(), ())]),
5308			bc.body(blocks[3]).unwrap()
5309		);
5310		assert_eq!(
5311			Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
5312			bc.body(blocks[4]).unwrap()
5313		);
5314		// Check the fork hashes.
5315		assert_eq!(None, bc.body(fork_hash_root).unwrap());
5316		assert_eq!(
5317			Some(vec![
5318				UncheckedXt::new_transaction(3.into(), ()),
5319				UncheckedXt::new_transaction(11.into(), ())
5320			]),
5321			bc.body(fork_hash_3).unwrap()
5322		);
5323
5324		// Unpin all blocks, except the forked one.
5325		for block in &blocks {
5326			backend.unpin_block(*block);
5327		}
5328		assert!(bc.body(blocks[0]).unwrap().is_none());
5329		assert!(bc.body(blocks[1]).unwrap().is_none());
5330		assert!(bc.body(blocks[2]).unwrap().is_none());
5331		assert!(bc.body(blocks[3]).unwrap().is_none());
5332
5333		assert!(bc.body(fork_hash_3).unwrap().is_some());
5334		backend.unpin_block(fork_hash_3);
5335		assert!(bc.body(fork_hash_3).unwrap().is_none());
5336	}
5337
5338	#[test]
5339	fn prune_blocks_with_empty_predicates_prunes_all() {
5340		// Test backward compatibility: empty predicates means all blocks are pruned
5341		let backend = Backend::<Block>::new_test_with_tx_storage_and_filters(
5342			BlocksPruning::Some(2),
5343			0,
5344			vec![], // Empty predicates
5345		);
5346
5347		let mut blocks = Vec::new();
5348		let mut prev_hash = Default::default();
5349
5350		// Create 5 blocks
5351		for i in 0..5 {
5352			let hash = insert_block(
5353				&backend,
5354				i,
5355				prev_hash,
5356				None,
5357				Default::default(),
5358				vec![UncheckedXt::new_transaction(i.into(), ())],
5359				None,
5360			)
5361			.unwrap();
5362			blocks.push(hash);
5363			prev_hash = hash;
5364		}
5365
5366		// Justification - but no predicate to preserve it
5367		let justification = (CONS0_ENGINE_ID, vec![1, 2, 3]);
5368
5369		// Finalize blocks, adding justification to block 1
5370		{
5371			let mut op = backend.begin_operation().unwrap();
5372			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
5373			op.mark_finalized(blocks[1], Some(justification.clone())).unwrap();
5374			op.mark_finalized(blocks[2], None).unwrap();
5375			op.mark_finalized(blocks[3], None).unwrap();
5376			op.mark_finalized(blocks[4], None).unwrap();
5377			backend.commit_operation(op).unwrap();
5378		}
5379
5380		let bc = backend.blockchain();
5381
5382		// All blocks outside pruning window should be pruned, even with justification
5383		assert_eq!(None, bc.body(blocks[0]).unwrap());
5384		assert_eq!(None, bc.body(blocks[1]).unwrap()); // Has justification but no predicate
5385		assert_eq!(None, bc.body(blocks[2]).unwrap());
5386
5387		// Blocks 3 and 4 are within the pruning window
5388		assert!(bc.body(blocks[3]).unwrap().is_some());
5389		assert!(bc.body(blocks[4]).unwrap().is_some());
5390	}
5391
5392	#[test]
5393	fn prune_blocks_multiple_filters_or_logic() {
5394		// Test that multiple filters use OR logic: if ANY filter matches, block is kept
5395		let backend = Backend::<Block>::new_test_with_tx_storage_and_filters(
5396			BlocksPruning::Some(2),
5397			0,
5398			vec![
5399				Arc::new(|j: &Justifications| j.get(CONS0_ENGINE_ID).is_some()),
5400				Arc::new(|j: &Justifications| j.get(CONS1_ENGINE_ID).is_some()),
5401			],
5402		);
5403
5404		let mut blocks = Vec::new();
5405		let mut prev_hash = Default::default();
5406
5407		// Create 7 blocks
5408		for i in 0..7 {
5409			let hash = insert_block(
5410				&backend,
5411				i,
5412				prev_hash,
5413				None,
5414				Default::default(),
5415				vec![UncheckedXt::new_transaction(i.into(), ())],
5416				None,
5417			)
5418			.unwrap();
5419			blocks.push(hash);
5420			prev_hash = hash;
5421		}
5422
5423		let cons0_justification = (CONS0_ENGINE_ID, vec![1, 2, 3]);
5424		let cons1_justification = (CONS1_ENGINE_ID, vec![4, 5, 6]);
5425
5426		// Finalize blocks with different justification patterns
5427		{
5428			let mut op = backend.begin_operation().unwrap();
5429			backend.begin_state_operation(&mut op, blocks[6]).unwrap();
5430			// Block 1: CONS0 only - should be preserved
5431			op.mark_finalized(blocks[1], Some(cons0_justification.clone())).unwrap();
5432			// Block 2: CONS1 only - should be preserved
5433			op.mark_finalized(blocks[2], Some(cons1_justification.clone())).unwrap();
5434			// Block 3: No justification - should be pruned
5435			op.mark_finalized(blocks[3], None).unwrap();
5436			// Block 4: Random/unknown engine ID - should be pruned
5437			op.mark_finalized(blocks[4], Some(([9, 9, 9, 9], vec![7, 8, 9]))).unwrap();
5438			op.mark_finalized(blocks[5], None).unwrap();
5439			op.mark_finalized(blocks[6], None).unwrap();
5440			backend.commit_operation(op).unwrap();
5441		}
5442
5443		let bc = backend.blockchain();
5444
5445		// Block 0 should be pruned (outside window, no justification)
5446		assert_eq!(None, bc.body(blocks[0]).unwrap());
5447
5448		// Block 1 should be preserved (has CONS0 justification)
5449		assert!(bc.body(blocks[1]).unwrap().is_some());
5450
5451		// Block 2 should be preserved (has CONS1 justification)
5452		assert!(bc.body(blocks[2]).unwrap().is_some());
5453
5454		// Block 3 should be pruned (no justification)
5455		assert_eq!(None, bc.body(blocks[3]).unwrap());
5456
5457		// Block 4 should be pruned (unknown engine ID)
5458		assert_eq!(None, bc.body(blocks[4]).unwrap());
5459
5460		// Blocks 5 and 6 are within the pruning window
5461		assert!(bc.body(blocks[5]).unwrap().is_some());
5462		assert!(bc.body(blocks[6]).unwrap().is_some());
5463	}
5464
5465	#[test]
5466	fn prune_blocks_filter_only_matches_specific_engine() {
5467		// Test that a filter for one engine ID does NOT preserve blocks with a different engine ID
5468		let backend = Backend::<Block>::new_test_with_tx_storage_and_filters(
5469			BlocksPruning::Some(2),
5470			0,
5471			vec![Arc::new(|j: &Justifications| j.get(CONS0_ENGINE_ID).is_some())],
5472		);
5473
5474		let mut blocks = Vec::new();
5475		let mut prev_hash = Default::default();
5476
5477		// Create 5 blocks
5478		for i in 0..5 {
5479			let hash = insert_block(
5480				&backend,
5481				i,
5482				prev_hash,
5483				None,
5484				Default::default(),
5485				vec![UncheckedXt::new_transaction(i.into(), ())],
5486				None,
5487			)
5488			.unwrap();
5489			blocks.push(hash);
5490			prev_hash = hash;
5491		}
5492
5493		let cons1_justification = (CONS1_ENGINE_ID, vec![4, 5, 6]);
5494
5495		// Finalize blocks, adding CONS1 justification to block 1
5496		{
5497			let mut op = backend.begin_operation().unwrap();
5498			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
5499			// Block 1 gets CONS1 justification - should NOT be preserved by CONS0 filter
5500			op.mark_finalized(blocks[1], Some(cons1_justification.clone())).unwrap();
5501			op.mark_finalized(blocks[2], None).unwrap();
5502			op.mark_finalized(blocks[3], None).unwrap();
5503			op.mark_finalized(blocks[4], None).unwrap();
5504			backend.commit_operation(op).unwrap();
5505		}
5506
5507		let bc = backend.blockchain();
5508
5509		// Block 0 should be pruned
5510		assert_eq!(None, bc.body(blocks[0]).unwrap());
5511
5512		// Block 1 should also be pruned (CONS1 justification, but only CONS0 filter)
5513		assert_eq!(None, bc.body(blocks[1]).unwrap());
5514
5515		// Block 2 should be pruned
5516		assert_eq!(None, bc.body(blocks[2]).unwrap());
5517
5518		// Blocks 3 and 4 are within the pruning window
5519		assert!(bc.body(blocks[3]).unwrap().is_some());
5520		assert!(bc.body(blocks[4]).unwrap().is_some());
5521	}
5522
5523	/// Insert a header without body as best block. This triggers `MissingBody` gap creation
5524	/// when the parent header exists and `create_gap` is true.
5525	fn insert_header_no_body_as_best(
5526		backend: &Backend<Block>,
5527		number: u64,
5528		parent_hash: H256,
5529	) -> H256 {
5530		use sp_runtime::testing::Digest;
5531
5532		let digest = Digest::default();
5533		let header = Header {
5534			number,
5535			parent_hash,
5536			state_root: Default::default(),
5537			digest,
5538			extrinsics_root: Default::default(),
5539		};
5540
5541		let mut op = backend.begin_operation().unwrap();
5542		// body = None triggers MissingBody gap when parent exists
5543		op.set_block_data(header.clone(), None, None, None, NewBlockState::Best, true)
5544			.unwrap();
5545		backend.commit_operation(op).unwrap();
5546
5547		header.hash()
5548	}
5549
5550	/// Re-open a backend from an existing database with the given blocks pruning mode.
5551	fn reopen_backend(
5552		db: Arc<dyn sp_database::Database<DbHash>>,
5553		blocks_pruning: BlocksPruning,
5554	) -> Backend<Block> {
5555		let state_pruning = match blocks_pruning {
5556			BlocksPruning::KeepAll => PruningMode::ArchiveAll,
5557			BlocksPruning::KeepFinalized => PruningMode::ArchiveCanonical,
5558			BlocksPruning::Some(n) => PruningMode::blocks_pruning(n),
5559		};
5560		Backend::<Block>::new(
5561			DatabaseSettings {
5562				trie_cache_maximum_size: Some(16 * 1024 * 1024),
5563				state_pruning: Some(state_pruning),
5564				source: DatabaseSource::Custom { db, require_create_flag: false },
5565				blocks_pruning,
5566				pruning_filters: Default::default(),
5567				metrics_registry: None,
5568			},
5569			0,
5570		)
5571		.unwrap()
5572	}
5573
5574	#[test]
5575	fn missing_body_gap_is_removed_for_non_archive_node() {
5576		// Create a non-archive backend and produce a multi-block MissingBody gap.
5577		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(100), 0);
5578		assert!(!backend.is_archive);
5579
5580		let genesis_hash = insert_header(&backend, 0, Default::default(), None, Default::default());
5581
5582		// Insert blocks 1..3 without bodies — creates a MissingBody gap spanning blocks 1 to 3.
5583		let hash_1 = insert_header_no_body_as_best(&backend, 1, genesis_hash);
5584		let hash_2 = insert_header_no_body_as_best(&backend, 2, hash_1);
5585		insert_header_no_body_as_best(&backend, 3, hash_2);
5586
5587		let info = backend.blockchain().info();
5588		assert!(info.block_gap.is_some(), "MissingBody gap should have been created");
5589		let gap = info.block_gap.unwrap();
5590		assert!(matches!(gap.gap_type, BlockGapType::MissingBody));
5591		assert_eq!(gap.start, 1);
5592		assert_eq!(gap.end, 3);
5593
5594		// Re-open the same database as a non-archive node.
5595		let db = backend.storage.db.clone();
5596		let backend = reopen_backend(db, BlocksPruning::Some(100));
5597		assert!(!backend.is_archive);
5598
5599		// The multi-block gap should have been removed on re-open.
5600		let info = backend.blockchain().info();
5601		assert!(
5602			info.block_gap.is_none(),
5603			"MissingBody gap should be removed for non-archive nodes, got: {:?}",
5604			info.block_gap,
5605		);
5606	}
5607
5608	#[test]
5609	fn missing_body_gap_is_preserved_for_archive_node() {
5610		// Create a backend with archive pruning and produce a multi-block MissingBody gap.
5611		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::KeepAll, 0);
5612		assert!(backend.is_archive);
5613
5614		let genesis_hash = insert_header(&backend, 0, Default::default(), None, Default::default());
5615
5616		// Insert blocks 1..3 without bodies — creates a MissingBody gap spanning blocks 1 to 3.
5617		let hash_1 = insert_header_no_body_as_best(&backend, 1, genesis_hash);
5618		let hash_2 = insert_header_no_body_as_best(&backend, 2, hash_1);
5619		insert_header_no_body_as_best(&backend, 3, hash_2);
5620
5621		let info = backend.blockchain().info();
5622		assert!(info.block_gap.is_some(), "MissingBody gap should have been created");
5623		let gap = info.block_gap.unwrap();
5624		assert!(matches!(gap.gap_type, BlockGapType::MissingBody));
5625		assert_eq!(gap.start, 1);
5626		assert_eq!(gap.end, 3);
5627
5628		// Re-open the same database as an archive node.
5629		let db = backend.storage.db.clone();
5630		let backend = reopen_backend(db, BlocksPruning::KeepAll);
5631		assert!(backend.is_archive);
5632
5633		// The gap should be preserved for archive nodes.
5634		let info = backend.blockchain().info();
5635		assert!(info.block_gap.is_some(), "MissingBody gap should be preserved for archive nodes",);
5636		let gap = info.block_gap.unwrap();
5637		assert!(matches!(gap.gap_type, BlockGapType::MissingBody));
5638		assert_eq!(gap.start, 1);
5639		assert_eq!(gap.end, 3);
5640	}
5641
5642	#[test]
5643	fn missing_header_and_body_gap_is_preserved_for_non_archive_node() {
5644		// Create a non-archive backend and produce a MissingHeaderAndBody gap (from warp sync).
5645		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(100), 0);
5646		assert!(!backend.is_archive);
5647
5648		let _genesis_hash =
5649			insert_header(&backend, 0, Default::default(), None, Default::default());
5650
5651		// Insert a disconnected block at height 3 with a fake parent to create a
5652		// MissingHeaderAndBody gap (blocks 1..2 are missing).
5653		insert_disconnected_header(&backend, 3, H256::from([200; 32]), Default::default(), true);
5654
5655		let info = backend.blockchain().info();
5656		assert!(info.block_gap.is_some(), "Gap should have been created");
5657		let gap = info.block_gap.unwrap();
5658		assert!(matches!(gap.gap_type, BlockGapType::MissingHeaderAndBody));
5659		assert_eq!(gap.start, 1);
5660		assert_eq!(gap.end, 2);
5661
5662		// Re-open the same database as a non-archive node.
5663		let db = backend.storage.db.clone();
5664		let backend = reopen_backend(db, BlocksPruning::Some(100));
5665		assert!(!backend.is_archive);
5666
5667		// The MissingHeaderAndBody gap should NOT be removed — only MissingBody gaps are removed.
5668		let info = backend.blockchain().info();
5669		assert!(
5670			info.block_gap.is_some(),
5671			"MissingHeaderAndBody gap should be preserved for non-archive nodes",
5672		);
5673		let gap = info.block_gap.unwrap();
5674		assert!(matches!(gap.gap_type, BlockGapType::MissingHeaderAndBody));
5675		assert_eq!(gap.start, 1);
5676		assert_eq!(gap.end, 2);
5677	}
5678}