Skip to main content

sc_client_db/
lib.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19//! Client backend that is backed by a database.
20//!
21//! # Canonicality vs. Finality
22//!
23//! Finality indicates that a block will not be reverted, according to the consensus algorithm,
24//! while canonicality indicates that the block may be reverted, but we will be unable to do so,
25//! having discarded heavy state that will allow a chain reorganization.
26//!
27//! Finality implies canonicality but not vice-versa.
28
29#![warn(missing_docs)]
30
31pub mod offchain;
32
33pub mod bench;
34
35mod children;
36mod parity_db;
37mod pinned_blocks_cache;
38mod record_stats_state;
39mod stats;
40#[cfg(any(feature = "rocksdb", test))]
41mod upgrade;
42mod utils;
43
44use linked_hash_map::LinkedHashMap;
45use log::{debug, trace, warn};
46use parking_lot::{Mutex, RwLock};
47use prometheus_endpoint::Registry;
48use std::{
49	collections::{HashMap, HashSet},
50	io,
51	path::{Path, PathBuf},
52	sync::Arc,
53};
54
55use crate::{
56	pinned_blocks_cache::PinnedBlocksCache,
57	record_stats_state::RecordStatsState,
58	stats::StateUsageStats,
59	utils::{meta_keys, read_db, read_meta, remove_from_db, DatabaseType, Meta},
60};
61use codec::{Decode, Encode};
62use hash_db::Prefix;
63use sc_client_api::{
64	backend::NewBlockState,
65	blockchain::{BlockGap, BlockGapType},
66	leaves::{FinalizationOutcome, LeafSet},
67	utils::is_descendent_of,
68	IoInfo, MemoryInfo, MemorySize, TrieCacheContext, UsageInfo,
69};
70use sc_state_db::{IsPruned, LastCanonicalized, StateDb};
71use sp_arithmetic::traits::Saturating;
72use sp_blockchain::{
73	Backend as _, CachedHeaderMetadata, DisplacedLeavesAfterFinalization, Error as ClientError,
74	HeaderBackend, HeaderMetadata, HeaderMetadataCache, Result as ClientResult,
75};
76use sp_core::{
77	offchain::OffchainOverlayedChange,
78	storage::{well_known_keys, ChildInfo},
79};
80use sp_database::Transaction;
81use sp_runtime::{
82	generic::BlockId,
83	traits::{
84		Block as BlockT, Hash, HashingFor, Header as HeaderT, NumberFor, One, SaturatedConversion,
85		Zero,
86	},
87	Justification, Justifications, StateVersion, Storage,
88};
89use sp_state_machine::{
90	backend::{AsTrieBackend, Backend as StateBackend},
91	BackendTransaction, ChildStorageCollection, DBValue, IndexOperation, IterArgs,
92	OffchainChangesCollection, StateMachineStats, StorageCollection, StorageIterator, StorageKey,
93	StorageValue, UsageInfo as StateUsageInfo,
94};
95use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, MerkleValue, PrefixedMemoryDB};
96use utils::BLOCK_GAP_CURRENT_VERSION;
97
98// Re-export the Database trait so that one can pass an implementation of it.
99pub use sc_state_db::PruningMode;
100pub use sp_database::Database;
101
102pub use bench::BenchmarkingState;
103
104/// Filter to determine if a block should be excluded from pruning.
105///
106/// Note: This filter only affects **block body** (and future header) pruning.
107/// It does **not** affect state pruning, which is configured separately.
108pub trait PruningFilter: Send + Sync {
109	/// Check if a block with the given justifications should be preserved.
110	///
111	/// Returns `true` to preserve the block, `false` to allow pruning.
112	fn should_retain(&self, justifications: &Justifications) -> bool;
113}
114
115impl<F> PruningFilter for F
116where
117	F: Fn(&Justifications) -> bool + Send + Sync,
118{
119	fn should_retain(&self, justifications: &Justifications) -> bool {
120		(self)(justifications)
121	}
122}
123
124const CACHE_HEADERS: usize = 8;
125
126/// DB-backed patricia trie state, transaction type is an overlay of changes to commit.
127pub type DbState<H> = sp_state_machine::TrieBackend<Arc<dyn sp_state_machine::Storage<H>>, H>;
128
129/// Builder for [`DbState`].
130pub type DbStateBuilder<Hasher> =
131	sp_state_machine::TrieBackendBuilder<Arc<dyn sp_state_machine::Storage<Hasher>>, Hasher>;
132
133/// Length of a [`DbHash`].
134const DB_HASH_LEN: usize = 32;
135
136/// Hash type that this backend uses for the database.
137pub type DbHash = sp_core::H256;
138
139/// An extrinsic entry in the database.
140#[derive(Debug, Encode, Decode)]
141enum DbExtrinsic<B: BlockT> {
142	/// Extrinsic that contains indexed data.
143	Indexed {
144		/// Hash of the indexed part.
145		hash: DbHash,
146		/// Extrinsic header.
147		header: Vec<u8>,
148	},
149	/// Complete extrinsic data.
150	Full(B::Extrinsic),
151	/// Extrinsic that renews multiple indexed data items within a single call.
152	///
153	/// `hashes` is in submission order: the proof-of-storage inherent provider
154	/// walks `block_indexed_body` linearly and the runtime indexes a parallel
155	/// `Vec<TransactionInfo>` by the same position, so reordering here would
156	/// desync proof construction from verification.
157	MultiRenew {
158		/// Submission order; see variant docs.
159		hashes: Vec<DbHash>,
160		extrinsic: Vec<u8>,
161	},
162}
163
164/// A reference tracking state.
165///
166/// It makes sure that the hash we are using stays pinned in storage
167/// until this structure is dropped.
168pub struct RefTrackingState<Block: BlockT> {
169	state: DbState<HashingFor<Block>>,
170	storage: Arc<StorageDb<Block>>,
171	parent_hash: Option<Block::Hash>,
172}
173
174impl<B: BlockT> RefTrackingState<B> {
175	fn new(
176		state: DbState<HashingFor<B>>,
177		storage: Arc<StorageDb<B>>,
178		parent_hash: Option<B::Hash>,
179	) -> Self {
180		RefTrackingState { state, parent_hash, storage }
181	}
182}
183
184impl<B: BlockT> Drop for RefTrackingState<B> {
185	fn drop(&mut self) {
186		if let Some(hash) = &self.parent_hash {
187			self.storage.state_db.unpin(hash);
188		}
189	}
190}
191
192impl<Block: BlockT> std::fmt::Debug for RefTrackingState<Block> {
193	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
194		write!(f, "Block {:?}", self.parent_hash)
195	}
196}
197
198/// A raw iterator over the `RefTrackingState`.
199pub struct RawIter<B: BlockT> {
200	inner: <DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::RawIter,
201}
202
203impl<B: BlockT> StorageIterator<HashingFor<B>> for RawIter<B> {
204	type Backend = RefTrackingState<B>;
205	type Error = <DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::Error;
206
207	fn next_key(&mut self, backend: &Self::Backend) -> Option<Result<StorageKey, Self::Error>> {
208		self.inner.next_key(&backend.state)
209	}
210
211	fn next_pair(
212		&mut self,
213		backend: &Self::Backend,
214	) -> Option<Result<(StorageKey, StorageValue), Self::Error>> {
215		self.inner.next_pair(&backend.state)
216	}
217
218	fn was_complete(&self) -> bool {
219		self.inner.was_complete()
220	}
221}
222
223impl<B: BlockT> StateBackend<HashingFor<B>> for RefTrackingState<B> {
224	type Error = <DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::Error;
225	type TrieBackendStorage =
226		<DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::TrieBackendStorage;
227	type RawIter = RawIter<B>;
228
229	fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
230		self.state.storage(key)
231	}
232
233	fn storage_hash(&self, key: &[u8]) -> Result<Option<B::Hash>, Self::Error> {
234		self.state.storage_hash(key)
235	}
236
237	fn child_storage(
238		&self,
239		child_info: &ChildInfo,
240		key: &[u8],
241	) -> Result<Option<Vec<u8>>, Self::Error> {
242		self.state.child_storage(child_info, key)
243	}
244
245	fn child_storage_hash(
246		&self,
247		child_info: &ChildInfo,
248		key: &[u8],
249	) -> Result<Option<B::Hash>, Self::Error> {
250		self.state.child_storage_hash(child_info, key)
251	}
252
253	fn closest_merkle_value(
254		&self,
255		key: &[u8],
256	) -> Result<Option<MerkleValue<B::Hash>>, Self::Error> {
257		self.state.closest_merkle_value(key)
258	}
259
260	fn child_closest_merkle_value(
261		&self,
262		child_info: &ChildInfo,
263		key: &[u8],
264	) -> Result<Option<MerkleValue<B::Hash>>, Self::Error> {
265		self.state.child_closest_merkle_value(child_info, key)
266	}
267
268	fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
269		self.state.exists_storage(key)
270	}
271
272	fn exists_child_storage(
273		&self,
274		child_info: &ChildInfo,
275		key: &[u8],
276	) -> Result<bool, Self::Error> {
277		self.state.exists_child_storage(child_info, key)
278	}
279
280	fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
281		self.state.next_storage_key(key)
282	}
283
284	fn next_child_storage_key(
285		&self,
286		child_info: &ChildInfo,
287		key: &[u8],
288	) -> Result<Option<Vec<u8>>, Self::Error> {
289		self.state.next_child_storage_key(child_info, key)
290	}
291
292	fn storage_root<'a>(
293		&self,
294		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
295		state_version: StateVersion,
296	) -> (B::Hash, BackendTransaction<HashingFor<B>>) {
297		self.state.storage_root(delta, state_version)
298	}
299
300	fn child_storage_root<'a>(
301		&self,
302		child_info: &ChildInfo,
303		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
304		state_version: StateVersion,
305	) -> (B::Hash, bool, BackendTransaction<HashingFor<B>>) {
306		self.state.child_storage_root(child_info, delta, state_version)
307	}
308
309	fn raw_iter(&self, args: IterArgs) -> Result<Self::RawIter, Self::Error> {
310		self.state.raw_iter(args).map(|inner| RawIter { inner })
311	}
312
313	fn register_overlay_stats(&self, stats: &StateMachineStats) {
314		self.state.register_overlay_stats(stats);
315	}
316
317	fn usage_info(&self) -> StateUsageInfo {
318		self.state.usage_info()
319	}
320}
321
322impl<B: BlockT> AsTrieBackend<HashingFor<B>> for RefTrackingState<B> {
323	type TrieBackendStorage =
324		<DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::TrieBackendStorage;
325
326	fn as_trie_backend(
327		&self,
328	) -> &sp_state_machine::TrieBackend<Self::TrieBackendStorage, HashingFor<B>> {
329		&self.state.as_trie_backend()
330	}
331}
332
333/// Database settings.
334pub struct DatabaseSettings {
335	/// The maximum trie cache size in bytes.
336	///
337	/// If `None` is given, the cache is disabled.
338	pub trie_cache_maximum_size: Option<usize>,
339	/// Requested state pruning mode.
340	pub state_pruning: Option<PruningMode>,
341	/// Where to find the database.
342	pub source: DatabaseSource,
343	/// Block pruning mode.
344	///
345	/// NOTE: only finalized blocks are subject for removal!
346	pub blocks_pruning: BlocksPruning,
347	/// Filters to exclude blocks from pruning.
348	///
349	/// If any filter returns `true` for a block's justifications, the block body
350	/// (and in the future, the header) will be preserved even when it falls
351	/// outside the pruning window. Does not affect state pruning.
352	pub pruning_filters: Vec<Arc<dyn PruningFilter>>,
353	/// Prometheus metrics registry.
354	pub metrics_registry: Option<Registry>,
355}
356
357/// Block pruning settings.
358#[derive(Debug, Clone, Copy, PartialEq)]
359pub enum BlocksPruning {
360	/// Keep full block history, of every block that was ever imported.
361	KeepAll,
362	/// Keep full finalized block history.
363	KeepFinalized,
364	/// Keep N recent finalized blocks.
365	Some(u32),
366}
367
368impl BlocksPruning {
369	/// True if this is an archive pruning mode (either KeepAll or KeepFinalized).
370	pub fn is_archive(&self) -> bool {
371		match *self {
372			BlocksPruning::KeepAll | BlocksPruning::KeepFinalized => true,
373			BlocksPruning::Some(_) => false,
374		}
375	}
376}
377
378/// Where to find the database..
379#[derive(Debug, Clone)]
380pub enum DatabaseSource {
381	/// Check given path, and see if there is an existing database there. If it's either `RocksDb`
382	/// or `ParityDb`, use it. If there is none, create a new instance of `ParityDb`.
383	Auto {
384		/// Path to the paritydb database.
385		paritydb_path: PathBuf,
386		/// Path to the rocksdb database.
387		rocksdb_path: PathBuf,
388		/// Cache size in MiB. Used only by `RocksDb` variant of `DatabaseSource`.
389		cache_size: usize,
390	},
391	/// Load a RocksDB database from a given path. Recommended for most uses.
392	#[cfg(feature = "rocksdb")]
393	RocksDb {
394		/// Path to the database.
395		path: PathBuf,
396		/// Cache size in MiB.
397		cache_size: usize,
398	},
399
400	/// Load a ParityDb database from a given path.
401	ParityDb {
402		/// Path to the database.
403		path: PathBuf,
404	},
405
406	/// Use a custom already-open database.
407	Custom {
408		/// the handle to the custom storage
409		db: Arc<dyn Database<DbHash>>,
410
411		/// if set, the `create` flag will be required to open such datasource
412		require_create_flag: bool,
413	},
414}
415
416impl DatabaseSource {
417	/// Return path for databases that are stored on disk.
418	pub fn path(&self) -> Option<&Path> {
419		match self {
420			// as per https://github.com/paritytech/substrate/pull/9500#discussion_r684312550
421			//
422			// IIUC this is needed for polkadot to create its own dbs, so until it can use parity db
423			// I would think rocksdb, but later parity-db.
424			DatabaseSource::Auto { paritydb_path, .. } => Some(paritydb_path),
425			#[cfg(feature = "rocksdb")]
426			DatabaseSource::RocksDb { path, .. } => Some(path),
427			DatabaseSource::ParityDb { path } => Some(path),
428			DatabaseSource::Custom { .. } => None,
429		}
430	}
431
432	/// Set path for databases that are stored on disk.
433	pub fn set_path(&mut self, p: &Path) -> bool {
434		match self {
435			DatabaseSource::Auto { ref mut paritydb_path, .. } => {
436				*paritydb_path = p.into();
437				true
438			},
439			#[cfg(feature = "rocksdb")]
440			DatabaseSource::RocksDb { ref mut path, .. } => {
441				*path = p.into();
442				true
443			},
444			DatabaseSource::ParityDb { ref mut path } => {
445				*path = p.into();
446				true
447			},
448			DatabaseSource::Custom { .. } => false,
449		}
450	}
451}
452
453impl std::fmt::Display for DatabaseSource {
454	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
455		let name = match self {
456			DatabaseSource::Auto { .. } => "Auto",
457			#[cfg(feature = "rocksdb")]
458			DatabaseSource::RocksDb { .. } => "RocksDb",
459			DatabaseSource::ParityDb { .. } => "ParityDb",
460			DatabaseSource::Custom { .. } => "Custom",
461		};
462		write!(f, "{}", name)
463	}
464}
465
466pub(crate) mod columns {
467	pub const META: u32 = crate::utils::COLUMN_META;
468	pub const STATE: u32 = 1;
469	pub const STATE_META: u32 = 2;
470	/// maps hashes to lookup keys and numbers to canon hashes.
471	pub const KEY_LOOKUP: u32 = 3;
472	pub const HEADER: u32 = 4;
473	pub const BODY: u32 = 5;
474	pub const JUSTIFICATIONS: u32 = 6;
475	pub const AUX: u32 = 8;
476	/// Offchain workers local storage
477	pub const OFFCHAIN: u32 = 9;
478	/// Transactions
479	pub const TRANSACTION: u32 = 11;
480	pub const BODY_INDEX: u32 = 12;
481}
482
483struct PendingBlock<Block: BlockT> {
484	header: Block::Header,
485	justifications: Option<Justifications>,
486	body: Option<Vec<Block::Extrinsic>>,
487	indexed_body: Option<Vec<Vec<u8>>>,
488	leaf_state: NewBlockState,
489	register_as_leaf: bool,
490}
491
492// wrapper that implements trait required for state_db
493#[derive(Clone)]
494struct StateMetaDb(Arc<dyn Database<DbHash>>);
495
496impl sc_state_db::MetaDb for StateMetaDb {
497	type Error = sp_database::error::DatabaseError;
498
499	fn get_meta(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
500		Ok(self.0.get(columns::STATE_META, key))
501	}
502}
503
504struct MetaUpdate<Block: BlockT> {
505	pub hash: Block::Hash,
506	pub number: NumberFor<Block>,
507	pub is_best: bool,
508	pub is_finalized: bool,
509	pub with_state: bool,
510}
511
512fn cache_header<Hash: std::cmp::Eq + std::hash::Hash, Header>(
513	cache: &mut LinkedHashMap<Hash, Option<Header>>,
514	hash: Hash,
515	header: Option<Header>,
516) {
517	cache.insert(hash, header);
518	while cache.len() > CACHE_HEADERS {
519		cache.pop_front();
520	}
521}
522
523/// Block database
524pub struct BlockchainDb<Block: BlockT> {
525	db: Arc<dyn Database<DbHash>>,
526	meta: Arc<RwLock<Meta<NumberFor<Block>, Block::Hash>>>,
527	leaves: RwLock<LeafSet<Block::Hash, NumberFor<Block>>>,
528	header_metadata_cache: Arc<HeaderMetadataCache<Block>>,
529	header_cache: Mutex<LinkedHashMap<Block::Hash, Option<Block::Header>>>,
530	pinned_blocks_cache: Arc<RwLock<PinnedBlocksCache<Block>>>,
531}
532
533impl<Block: BlockT> BlockchainDb<Block> {
534	fn new(db: Arc<dyn Database<DbHash>>) -> ClientResult<Self> {
535		let meta = read_meta::<Block>(&*db, columns::HEADER)?;
536		let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?;
537		Ok(BlockchainDb {
538			db,
539			leaves: RwLock::new(leaves),
540			meta: Arc::new(RwLock::new(meta)),
541			header_metadata_cache: Arc::new(HeaderMetadataCache::default()),
542			header_cache: Default::default(),
543			pinned_blocks_cache: Arc::new(RwLock::new(PinnedBlocksCache::new())),
544		})
545	}
546
547	fn update_meta(&self, update: MetaUpdate<Block>) {
548		let MetaUpdate { hash, number, is_best, is_finalized, with_state } = update;
549		let mut meta = self.meta.write();
550		if number.is_zero() {
551			meta.genesis_hash = hash;
552		}
553
554		if is_best {
555			meta.best_number = number;
556			meta.best_hash = hash;
557		}
558
559		if is_finalized {
560			if with_state {
561				meta.finalized_state = Some((hash, number));
562			}
563			meta.finalized_number = number;
564			meta.finalized_hash = hash;
565		}
566	}
567
568	fn update_block_gap(&self, gap: Option<BlockGap<NumberFor<Block>>>) {
569		let mut meta = self.meta.write();
570		meta.block_gap = gap;
571	}
572
573	/// Empty the cache of pinned items.
574	fn clear_pinning_cache(&self) {
575		self.pinned_blocks_cache.write().clear();
576	}
577
578	/// Load a justification into the cache of pinned items.
579	/// Reference count of the item will not be increased. Use this
580	/// to load values for items into the cache which have already been pinned.
581	fn insert_justifications_if_pinned(&self, hash: Block::Hash, justification: Justification) {
582		let mut cache = self.pinned_blocks_cache.write();
583		if !cache.contains(hash) {
584			return;
585		}
586
587		let justifications = Justifications::from(justification);
588		cache.insert_justifications(hash, Some(justifications));
589	}
590
591	/// Load a justification from the db into the cache of pinned items.
592	/// Reference count of the item will not be increased. Use this
593	/// to load values for items into the cache which have already been pinned.
594	fn insert_persisted_justifications_if_pinned(&self, hash: Block::Hash) -> ClientResult<()> {
595		let mut cache = self.pinned_blocks_cache.write();
596		if !cache.contains(hash) {
597			return Ok(());
598		}
599
600		let justifications = self.justifications_uncached(hash)?;
601		cache.insert_justifications(hash, justifications);
602		Ok(())
603	}
604
605	/// Load a block body from the db into the cache of pinned items.
606	/// Reference count of the item will not be increased. Use this
607	/// to load values for items items into the cache which have already been pinned.
608	fn insert_persisted_body_if_pinned(&self, hash: Block::Hash) -> ClientResult<()> {
609		let mut cache = self.pinned_blocks_cache.write();
610		if !cache.contains(hash) {
611			return Ok(());
612		}
613
614		let body = self.body_uncached(hash)?;
615		cache.insert_body(hash, body);
616		Ok(())
617	}
618
619	/// Bump reference count for pinned item.
620	fn bump_ref(&self, hash: Block::Hash) {
621		self.pinned_blocks_cache.write().pin(hash);
622	}
623
624	/// Decrease reference count for pinned item and remove if reference count is 0.
625	fn unpin(&self, hash: Block::Hash) {
626		self.pinned_blocks_cache.write().unpin(hash);
627	}
628
629	fn justifications_uncached(&self, hash: Block::Hash) -> ClientResult<Option<Justifications>> {
630		match read_db(
631			&*self.db,
632			columns::KEY_LOOKUP,
633			columns::JUSTIFICATIONS,
634			BlockId::<Block>::Hash(hash),
635		)? {
636			Some(justifications) => match Decode::decode(&mut &justifications[..]) {
637				Ok(justifications) => Ok(Some(justifications)),
638				Err(err) => {
639					return Err(sp_blockchain::Error::Backend(format!(
640						"Error decoding justifications: {err}"
641					)))
642				},
643			},
644			None => Ok(None),
645		}
646	}
647
648	fn body_uncached(&self, hash: Block::Hash) -> ClientResult<Option<Vec<Block::Extrinsic>>> {
649		if let Some(body) =
650			read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, BlockId::Hash::<Block>(hash))?
651		{
652			// Plain body
653			match Decode::decode(&mut &body[..]) {
654				Ok(body) => return Ok(Some(body)),
655				Err(err) => {
656					return Err(sp_blockchain::Error::Backend(format!(
657						"Error decoding body: {err}"
658					)))
659				},
660			}
661		}
662
663		if let Some(index) = read_db(
664			&*self.db,
665			columns::KEY_LOOKUP,
666			columns::BODY_INDEX,
667			BlockId::Hash::<Block>(hash),
668		)? {
669			match Vec::<DbExtrinsic<Block>>::decode(&mut &index[..]) {
670				Ok(index) => {
671					let mut body = Vec::new();
672					for ex in index {
673						match ex {
674							DbExtrinsic::Indexed { hash, header } => {
675								match self.db.get(columns::TRANSACTION, hash.as_ref()) {
676									Some(t) => {
677										let mut input =
678											utils::join_input(header.as_ref(), t.as_ref());
679										let ex = Block::Extrinsic::decode(&mut input).map_err(
680											|err| {
681												sp_blockchain::Error::Backend(format!(
682													"Error decoding indexed extrinsic: {err}"
683												))
684											},
685										)?;
686										body.push(ex);
687									},
688									None => {
689										return Err(sp_blockchain::Error::Backend(format!(
690											"Missing indexed transaction {hash:?}"
691										)))
692									},
693								};
694							},
695							DbExtrinsic::Full(ex) => {
696								body.push(ex);
697							},
698							DbExtrinsic::MultiRenew { extrinsic, .. } => {
699								// Multi-renewal extrinsic: header contains the full
700								// encoded extrinsic (no indexed data to join).
701								let ex = Block::Extrinsic::decode(&mut &extrinsic[..]).map_err(
702									|err| {
703										sp_blockchain::Error::Backend(format!(
704											"Error decoding multi-renew extrinsic: {err}"
705										))
706									},
707								)?;
708								body.push(ex);
709							},
710						}
711					}
712					return Ok(Some(body));
713				},
714				Err(err) => {
715					return Err(sp_blockchain::Error::Backend(format!(
716						"Error decoding body list: {err}",
717					)))
718				},
719			}
720		}
721		Ok(None)
722	}
723
724	fn block_indexed_hashes_iter(
725		&self,
726		hash: Block::Hash,
727	) -> ClientResult<Option<impl Iterator<Item = DbHash>>> {
728		let Some(body) = read_db(
729			&*self.db,
730			columns::KEY_LOOKUP,
731			columns::BODY_INDEX,
732			BlockId::<Block>::Hash(hash),
733		)?
734		else {
735			return Ok(None);
736		};
737		match Vec::<DbExtrinsic<Block>>::decode(&mut &body[..]) {
738			Ok(index) => Ok(Some(index.into_iter().flat_map(|ex| match ex {
739				DbExtrinsic::Indexed { hash, .. } => vec![hash],
740				DbExtrinsic::MultiRenew { hashes, .. } => hashes.into_iter().collect(),
741				_ => vec![],
742			}))),
743			Err(err) => {
744				Err(sp_blockchain::Error::Backend(format!("Error decoding body list: {err}")))
745			},
746		}
747	}
748}
749
750impl<Block: BlockT> sc_client_api::blockchain::HeaderBackend<Block> for BlockchainDb<Block> {
751	fn header(&self, hash: Block::Hash) -> ClientResult<Option<Block::Header>> {
752		let mut cache = self.header_cache.lock();
753		if let Some(result) = cache.get_refresh(&hash) {
754			return Ok(result.clone());
755		}
756		let header = utils::read_header(
757			&*self.db,
758			columns::KEY_LOOKUP,
759			columns::HEADER,
760			BlockId::<Block>::Hash(hash),
761		)?;
762		cache_header(&mut cache, hash, header.clone());
763		Ok(header)
764	}
765
766	fn info(&self) -> sc_client_api::blockchain::Info<Block> {
767		let meta = self.meta.read();
768		sc_client_api::blockchain::Info {
769			best_hash: meta.best_hash,
770			best_number: meta.best_number,
771			genesis_hash: meta.genesis_hash,
772			finalized_hash: meta.finalized_hash,
773			finalized_number: meta.finalized_number,
774			finalized_state: meta.finalized_state,
775			number_leaves: self.leaves.read().count(),
776			block_gap: meta.block_gap,
777		}
778	}
779
780	fn status(&self, hash: Block::Hash) -> ClientResult<sc_client_api::blockchain::BlockStatus> {
781		match self.header(hash)?.is_some() {
782			true => Ok(sc_client_api::blockchain::BlockStatus::InChain),
783			false => Ok(sc_client_api::blockchain::BlockStatus::Unknown),
784		}
785	}
786
787	fn number(&self, hash: Block::Hash) -> ClientResult<Option<NumberFor<Block>>> {
788		Ok(self.header_metadata(hash).ok().map(|header_metadata| header_metadata.number))
789	}
790
791	fn hash(&self, number: NumberFor<Block>) -> ClientResult<Option<Block::Hash>> {
792		Ok(utils::read_header::<Block>(
793			&*self.db,
794			columns::KEY_LOOKUP,
795			columns::HEADER,
796			BlockId::Number(number),
797		)?
798		.map(|header| header.hash()))
799	}
800}
801
802impl<Block: BlockT> sc_client_api::blockchain::Backend<Block> for BlockchainDb<Block> {
803	fn body(&self, hash: Block::Hash) -> ClientResult<Option<Vec<Block::Extrinsic>>> {
804		let cache = self.pinned_blocks_cache.read();
805		if let Some(result) = cache.body(&hash) {
806			return Ok(result.clone());
807		}
808
809		self.body_uncached(hash)
810	}
811
812	fn justifications(&self, hash: Block::Hash) -> ClientResult<Option<Justifications>> {
813		let cache = self.pinned_blocks_cache.read();
814		if let Some(result) = cache.justifications(&hash) {
815			return Ok(result.clone());
816		}
817
818		self.justifications_uncached(hash)
819	}
820
821	fn last_finalized(&self) -> ClientResult<Block::Hash> {
822		Ok(self.meta.read().finalized_hash)
823	}
824
825	fn leaves(&self) -> ClientResult<Vec<Block::Hash>> {
826		Ok(self.leaves.read().hashes())
827	}
828
829	fn children(&self, parent_hash: Block::Hash) -> ClientResult<Vec<Block::Hash>> {
830		children::read_children(&*self.db, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash)
831	}
832
833	fn indexed_transaction(&self, hash: DbHash) -> ClientResult<Option<Vec<u8>>> {
834		Ok(self.db.get(columns::TRANSACTION, hash.as_ref()))
835	}
836
837	fn has_indexed_transaction(&self, hash: DbHash) -> ClientResult<bool> {
838		Ok(self.db.contains(columns::TRANSACTION, hash.as_ref()))
839	}
840
841	fn block_indexed_hashes(&self, hash: Block::Hash) -> ClientResult<Option<Vec<DbHash>>> {
842		self.block_indexed_hashes_iter(hash).map(|hashes| hashes.map(Iterator::collect))
843	}
844
845	fn block_indexed_body(&self, hash: Block::Hash) -> ClientResult<Option<Vec<Vec<u8>>>> {
846		match self.block_indexed_hashes_iter(hash) {
847			Ok(Some(hashes)) => Ok(Some(
848				hashes
849					.map(|hash| match self.db.get(columns::TRANSACTION, hash.as_ref()) {
850						Some(t) => Ok(t),
851						None => Err(sp_blockchain::Error::Backend(format!(
852							"Missing indexed transaction {hash:?}",
853						))),
854					})
855					.collect::<Result<_, _>>()?,
856			)),
857			Ok(None) => Ok(None),
858			Err(err) => Err(err),
859		}
860	}
861}
862
863impl<Block: BlockT> HeaderMetadata<Block> for BlockchainDb<Block> {
864	type Error = sp_blockchain::Error;
865
866	fn header_metadata(
867		&self,
868		hash: Block::Hash,
869	) -> Result<CachedHeaderMetadata<Block>, Self::Error> {
870		self.header_metadata_cache.header_metadata(hash).map_or_else(
871			|| {
872				self.header(hash)?
873					.map(|header| {
874						let header_metadata = CachedHeaderMetadata::from(&header);
875						self.header_metadata_cache
876							.insert_header_metadata(header_metadata.hash, header_metadata.clone());
877						header_metadata
878					})
879					.ok_or_else(|| {
880						ClientError::UnknownBlock(format!(
881							"Header was not found in the database: {hash:?}",
882						))
883					})
884			},
885			Ok,
886		)
887	}
888
889	fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata<Block>) {
890		self.header_metadata_cache.insert_header_metadata(hash, metadata)
891	}
892
893	fn remove_header_metadata(&self, hash: Block::Hash) {
894		self.header_cache.lock().remove(&hash);
895		self.header_metadata_cache.remove_header_metadata(hash);
896	}
897}
898
899/// Database transaction
900pub struct BlockImportOperation<Block: BlockT> {
901	old_state: RecordStatsState<RefTrackingState<Block>, Block>,
902	db_updates: PrefixedMemoryDB<HashingFor<Block>>,
903	storage_updates: StorageCollection,
904	child_storage_updates: ChildStorageCollection,
905	offchain_storage_updates: OffchainChangesCollection,
906	pending_block: Option<PendingBlock<Block>>,
907	aux_ops: Vec<(Vec<u8>, Option<Vec<u8>>)>,
908	finalized_blocks: Vec<(Block::Hash, Option<Justification>)>,
909	set_head: Option<Block::Hash>,
910	commit_state: bool,
911	create_gap: bool,
912	reset_storage: bool,
913	index_ops: Vec<IndexOperation>,
914}
915
916impl<Block: BlockT> BlockImportOperation<Block> {
917	fn apply_offchain(&mut self, transaction: &mut Transaction<DbHash>) {
918		let mut count = 0;
919		for ((prefix, key), value_operation) in self.offchain_storage_updates.drain(..) {
920			count += 1;
921			let key = crate::offchain::concatenate_prefix_and_key(&prefix, &key);
922			match value_operation {
923				OffchainOverlayedChange::SetValue(val) => {
924					transaction.set_from_vec(columns::OFFCHAIN, &key, val)
925				},
926				OffchainOverlayedChange::Remove => transaction.remove(columns::OFFCHAIN, &key),
927			}
928		}
929
930		if count > 0 {
931			log::debug!(target: "sc_offchain", "Applied {count} offchain indexing changes.");
932		}
933	}
934
935	fn apply_aux(&mut self, transaction: &mut Transaction<DbHash>) {
936		for (key, maybe_val) in self.aux_ops.drain(..) {
937			match maybe_val {
938				Some(val) => transaction.set_from_vec(columns::AUX, &key, val),
939				None => transaction.remove(columns::AUX, &key),
940			}
941		}
942	}
943
944	fn apply_new_state(
945		&mut self,
946		storage: Storage,
947		state_version: StateVersion,
948	) -> ClientResult<Block::Hash> {
949		if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(k)) {
950			return Err(sp_blockchain::Error::InvalidState);
951		}
952
953		let child_delta = storage.children_default.values().map(|child_content| {
954			(
955				&child_content.child_info,
956				child_content.data.iter().map(|(k, v)| (&k[..], Some(&v[..]))),
957			)
958		});
959
960		let (root, transaction) = self.old_state.full_storage_root(
961			storage.top.iter().map(|(k, v)| (&k[..], Some(&v[..]))),
962			child_delta,
963			state_version,
964		);
965
966		self.db_updates = transaction;
967		Ok(root)
968	}
969}
970
971impl<Block: BlockT> sc_client_api::backend::BlockImportOperation<Block>
972	for BlockImportOperation<Block>
973{
974	type State = RecordStatsState<RefTrackingState<Block>, Block>;
975
976	fn state(&self) -> ClientResult<Option<&Self::State>> {
977		Ok(Some(&self.old_state))
978	}
979
980	fn set_block_data(
981		&mut self,
982		header: Block::Header,
983		body: Option<Vec<Block::Extrinsic>>,
984		indexed_body: Option<Vec<Vec<u8>>>,
985		justifications: Option<Justifications>,
986		leaf_state: NewBlockState,
987		register_as_leaf: bool,
988	) -> ClientResult<()> {
989		assert!(self.pending_block.is_none(), "Only one block per operation is allowed");
990		self.pending_block = Some(PendingBlock {
991			header,
992			body,
993			indexed_body,
994			justifications,
995			leaf_state,
996			register_as_leaf,
997		});
998		Ok(())
999	}
1000
1001	fn update_db_storage(
1002		&mut self,
1003		update: PrefixedMemoryDB<HashingFor<Block>>,
1004	) -> ClientResult<()> {
1005		self.db_updates = update;
1006		Ok(())
1007	}
1008
1009	fn reset_storage(
1010		&mut self,
1011		storage: Storage,
1012		state_version: StateVersion,
1013	) -> ClientResult<Block::Hash> {
1014		let root = self.apply_new_state(storage, state_version)?;
1015		self.commit_state = true;
1016		self.reset_storage = true;
1017		Ok(root)
1018	}
1019
1020	fn set_genesis_state(
1021		&mut self,
1022		storage: Storage,
1023		commit: bool,
1024		state_version: StateVersion,
1025	) -> ClientResult<Block::Hash> {
1026		let root = self.apply_new_state(storage, state_version)?;
1027		self.commit_state = commit;
1028		Ok(root)
1029	}
1030
1031	fn insert_aux<I>(&mut self, ops: I) -> ClientResult<()>
1032	where
1033		I: IntoIterator<Item = (Vec<u8>, Option<Vec<u8>>)>,
1034	{
1035		self.aux_ops.append(&mut ops.into_iter().collect());
1036		Ok(())
1037	}
1038
1039	fn update_storage(
1040		&mut self,
1041		update: StorageCollection,
1042		child_update: ChildStorageCollection,
1043	) -> ClientResult<()> {
1044		self.storage_updates = update;
1045		self.child_storage_updates = child_update;
1046		Ok(())
1047	}
1048
1049	fn update_offchain_storage(
1050		&mut self,
1051		offchain_update: OffchainChangesCollection,
1052	) -> ClientResult<()> {
1053		self.offchain_storage_updates = offchain_update;
1054		Ok(())
1055	}
1056
1057	fn mark_finalized(
1058		&mut self,
1059		block: Block::Hash,
1060		justification: Option<Justification>,
1061	) -> ClientResult<()> {
1062		self.finalized_blocks.push((block, justification));
1063		Ok(())
1064	}
1065
1066	fn mark_head(&mut self, hash: Block::Hash) -> ClientResult<()> {
1067		assert!(self.set_head.is_none(), "Only one set head per operation is allowed");
1068		self.set_head = Some(hash);
1069		Ok(())
1070	}
1071
1072	fn update_transaction_index(&mut self, index_ops: Vec<IndexOperation>) -> ClientResult<()> {
1073		self.index_ops = index_ops;
1074		Ok(())
1075	}
1076
1077	fn set_create_gap(&mut self, create_gap: bool) {
1078		self.create_gap = create_gap;
1079	}
1080}
1081
1082struct StorageDb<Block: BlockT> {
1083	pub db: Arc<dyn Database<DbHash>>,
1084	pub state_db: StateDb<Block::Hash, Vec<u8>, StateMetaDb>,
1085	prefix_keys: bool,
1086}
1087
1088impl<Block: BlockT> sp_state_machine::Storage<HashingFor<Block>> for StorageDb<Block> {
1089	fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result<Option<DBValue>, String> {
1090		if self.prefix_keys {
1091			let key = prefixed_key::<HashingFor<Block>>(key, prefix);
1092			self.state_db.get(&key, self)
1093		} else {
1094			self.state_db.get(key.as_ref(), self)
1095		}
1096		.map_err(|e| format!("Database backend error: {e:?}"))
1097	}
1098}
1099
1100impl<Block: BlockT> sc_state_db::NodeDb for StorageDb<Block> {
1101	type Error = io::Error;
1102	type Key = [u8];
1103
1104	fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
1105		Ok(self.db.get(columns::STATE, key))
1106	}
1107}
1108
1109struct DbGenesisStorage<Block: BlockT> {
1110	root: Block::Hash,
1111	storage: PrefixedMemoryDB<HashingFor<Block>>,
1112}
1113
1114impl<Block: BlockT> DbGenesisStorage<Block> {
1115	pub fn new(root: Block::Hash, storage: PrefixedMemoryDB<HashingFor<Block>>) -> Self {
1116		DbGenesisStorage { root, storage }
1117	}
1118}
1119
1120impl<Block: BlockT> sp_state_machine::Storage<HashingFor<Block>> for DbGenesisStorage<Block> {
1121	fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result<Option<DBValue>, String> {
1122		use hash_db::HashDB;
1123		Ok(self.storage.get(key, prefix))
1124	}
1125}
1126
1127struct EmptyStorage<Block: BlockT>(pub Block::Hash);
1128
1129impl<Block: BlockT> EmptyStorage<Block> {
1130	pub fn new() -> Self {
1131		let mut root = Block::Hash::default();
1132		let mut mdb = MemoryDB::<HashingFor<Block>>::default();
1133		// both triedbmut are the same on empty storage.
1134		sp_trie::trie_types::TrieDBMutBuilderV1::<HashingFor<Block>>::new(&mut mdb, &mut root)
1135			.build();
1136		EmptyStorage(root)
1137	}
1138}
1139
1140impl<Block: BlockT> sp_state_machine::Storage<HashingFor<Block>> for EmptyStorage<Block> {
1141	fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result<Option<DBValue>, String> {
1142		Ok(None)
1143	}
1144}
1145
1146/// Frozen `value` at time `at`.
1147///
1148/// Used as inner structure under lock in `FrozenForDuration`.
1149struct Frozen<T: Clone> {
1150	at: std::time::Instant,
1151	value: Option<T>,
1152}
1153
1154/// Some value frozen for period of time.
1155///
1156/// If time `duration` not passed since the value was instantiated,
1157/// current frozen value is returned. Otherwise, you have to provide
1158/// a new value which will be again frozen for `duration`.
1159pub(crate) struct FrozenForDuration<T: Clone> {
1160	duration: std::time::Duration,
1161	value: parking_lot::Mutex<Frozen<T>>,
1162}
1163
1164impl<T: Clone> FrozenForDuration<T> {
1165	fn new(duration: std::time::Duration) -> Self {
1166		Self { duration, value: Frozen { at: std::time::Instant::now(), value: None }.into() }
1167	}
1168
1169	fn take_or_else<F>(&self, f: F) -> T
1170	where
1171		F: FnOnce() -> T,
1172	{
1173		let mut lock = self.value.lock();
1174		let now = std::time::Instant::now();
1175		match lock.value.as_ref() {
1176			Some(value) if now.saturating_duration_since(lock.at) <= self.duration => value.clone(),
1177			_ => {
1178				let new_value = f();
1179				lock.at = now;
1180				lock.value = Some(new_value.clone());
1181				new_value
1182			},
1183		}
1184	}
1185}
1186
1187/// Disk backend.
1188///
1189/// Disk backend keeps data in a key-value store. In archive mode, trie nodes are kept from all
1190/// blocks. Otherwise, trie nodes are kept only from some recent blocks.
1191pub struct Backend<Block: BlockT> {
1192	storage: Arc<StorageDb<Block>>,
1193	offchain_storage: offchain::LocalStorage,
1194	blockchain: BlockchainDb<Block>,
1195	canonicalization_delay: u64,
1196	import_lock: Arc<RwLock<()>>,
1197	is_archive: bool,
1198	blocks_pruning: BlocksPruning,
1199	io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>,
1200	state_usage: Arc<StateUsageStats>,
1201	genesis_state: RwLock<Option<Arc<DbGenesisStorage<Block>>>>,
1202	shared_trie_cache: Option<sp_trie::cache::SharedTrieCache<HashingFor<Block>>>,
1203	pruning_filters: Vec<Arc<dyn PruningFilter>>,
1204}
1205
1206impl<Block: BlockT> Backend<Block> {
1207	/// Create a new instance of database backend.
1208	///
1209	/// The pruning window is how old a block must be before the state is pruned.
1210	pub fn new(db_config: DatabaseSettings, canonicalization_delay: u64) -> ClientResult<Self> {
1211		use utils::OpenDbError;
1212
1213		let db_source = &db_config.source;
1214
1215		let (needs_init, db) =
1216			match crate::utils::open_database::<Block>(db_source, DatabaseType::Full, false) {
1217				Ok(db) => (false, db),
1218				Err(OpenDbError::DoesNotExist) => {
1219					let db =
1220						crate::utils::open_database::<Block>(db_source, DatabaseType::Full, true)?;
1221					(true, db)
1222				},
1223				Err(as_is) => return Err(as_is.into()),
1224			};
1225
1226		Self::from_database(db as Arc<_>, canonicalization_delay, &db_config, needs_init)
1227	}
1228
1229	/// Reset the shared trie cache.
1230	pub fn reset_trie_cache(&self) {
1231		if let Some(cache) = &self.shared_trie_cache {
1232			cache.reset();
1233		}
1234	}
1235
1236	/// Create new memory-backed client backend for tests.
1237	#[cfg(any(test, feature = "test-helpers"))]
1238	pub fn new_test(blocks_pruning: u32, canonicalization_delay: u64) -> Self {
1239		Self::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), canonicalization_delay)
1240	}
1241
1242	/// Create new memory-backed client backend for tests with custom pruning filters.
1243	#[cfg(any(test, feature = "test-helpers"))]
1244	pub fn new_test_with_pruning_filters(
1245		blocks_pruning: u32,
1246		canonicalization_delay: u64,
1247		pruning_filters: Vec<Arc<dyn PruningFilter>>,
1248	) -> Self {
1249		Self::new_test_with_tx_storage_and_filters(
1250			BlocksPruning::Some(blocks_pruning),
1251			canonicalization_delay,
1252			pruning_filters,
1253		)
1254	}
1255
1256	/// Create new memory-backed client backend for tests.
1257	#[cfg(any(test, feature = "test-helpers"))]
1258	pub fn new_test_with_tx_storage(
1259		blocks_pruning: BlocksPruning,
1260		canonicalization_delay: u64,
1261	) -> Self {
1262		Self::new_test_with_tx_storage_and_filters(
1263			blocks_pruning,
1264			canonicalization_delay,
1265			Default::default(),
1266		)
1267	}
1268
1269	/// Create new memory-backed client backend for tests with custom pruning filters.
1270	#[cfg(any(test, feature = "test-helpers"))]
1271	pub fn new_test_with_tx_storage_and_filters(
1272		blocks_pruning: BlocksPruning,
1273		canonicalization_delay: u64,
1274		pruning_filters: Vec<Arc<dyn PruningFilter>>,
1275	) -> Self {
1276		let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS);
1277		let db = sp_database::as_database(db);
1278		let state_pruning = match blocks_pruning {
1279			BlocksPruning::KeepAll => PruningMode::ArchiveAll,
1280			BlocksPruning::KeepFinalized => PruningMode::ArchiveCanonical,
1281			BlocksPruning::Some(n) => PruningMode::blocks_pruning(n),
1282		};
1283		let db_setting = DatabaseSettings {
1284			trie_cache_maximum_size: Some(16 * 1024 * 1024),
1285			state_pruning: Some(state_pruning),
1286			source: DatabaseSource::Custom { db, require_create_flag: true },
1287			blocks_pruning,
1288			pruning_filters,
1289			metrics_registry: None,
1290		};
1291
1292		Self::new(db_setting, canonicalization_delay).expect("failed to create test-db")
1293	}
1294
1295	/// Expose the Database that is used by this backend.
1296	/// The second argument is the Column that stores the State.
1297	///
1298	/// Should only be needed for benchmarking.
1299	#[cfg(feature = "runtime-benchmarks")]
1300	pub fn expose_db(&self) -> (Arc<dyn sp_database::Database<DbHash>>, sp_database::ColumnId) {
1301		(self.storage.db.clone(), columns::STATE)
1302	}
1303
1304	/// Expose the Storage that is used by this backend.
1305	///
1306	/// Should only be needed for benchmarking.
1307	#[cfg(feature = "runtime-benchmarks")]
1308	pub fn expose_storage(&self) -> Arc<dyn sp_state_machine::Storage<HashingFor<Block>>> {
1309		self.storage.clone()
1310	}
1311
1312	/// Expose the shared trie cache that is used by this backend.
1313	///
1314	/// Should only be needed for benchmarking.
1315	#[cfg(feature = "runtime-benchmarks")]
1316	pub fn expose_shared_trie_cache(
1317		&self,
1318	) -> Option<sp_trie::cache::SharedTrieCache<HashingFor<Block>>> {
1319		self.shared_trie_cache.clone()
1320	}
1321
1322	fn from_database(
1323		db: Arc<dyn Database<DbHash>>,
1324		canonicalization_delay: u64,
1325		config: &DatabaseSettings,
1326		should_init: bool,
1327	) -> ClientResult<Self> {
1328		let mut db_init_transaction = Transaction::new();
1329
1330		let requested_state_pruning = config.state_pruning.clone();
1331		let state_meta_db = StateMetaDb(db.clone());
1332		let map_e = sp_blockchain::Error::from_state_db;
1333
1334		let (state_db_init_commit_set, state_db) = StateDb::open(
1335			state_meta_db,
1336			requested_state_pruning,
1337			!db.supports_ref_counting(),
1338			should_init,
1339		)
1340		.map_err(map_e)?;
1341
1342		apply_state_commit(&mut db_init_transaction, state_db_init_commit_set);
1343
1344		let state_pruning_used = state_db.pruning_mode();
1345		let is_archive_pruning = state_pruning_used.is_archive();
1346		let blockchain = BlockchainDb::new(db.clone())?;
1347
1348		let storage_db =
1349			StorageDb { db: db.clone(), state_db, prefix_keys: !db.supports_ref_counting() };
1350
1351		let offchain_storage = offchain::LocalStorage::new(db.clone());
1352
1353		let shared_trie_cache = config.trie_cache_maximum_size.map(|maximum_size| {
1354			let system_memory = sysinfo::System::new_all();
1355			let used_memory = system_memory.used_memory();
1356			let total_memory = system_memory.total_memory();
1357
1358			debug!("Initializing shared trie cache with size {} bytes, {}% of total memory", maximum_size, (maximum_size as f64 / total_memory as f64 * 100.0));
1359			if maximum_size as u64 > total_memory - used_memory {
1360				warn!(
1361					"Not enough memory to initialize shared trie cache. Cache size: {} bytes. System memory: used {} bytes, total {} bytes",
1362					maximum_size, used_memory, total_memory,
1363				);
1364			}
1365
1366			SharedTrieCache::new(sp_trie::cache::CacheSize::new(maximum_size), config.metrics_registry.as_ref())
1367		});
1368
1369		let backend = Backend {
1370			storage: Arc::new(storage_db),
1371			offchain_storage,
1372			blockchain,
1373			canonicalization_delay,
1374			import_lock: Default::default(),
1375			is_archive: is_archive_pruning,
1376			io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)),
1377			state_usage: Arc::new(StateUsageStats::new()),
1378			blocks_pruning: config.blocks_pruning,
1379			genesis_state: RwLock::new(None),
1380			shared_trie_cache,
1381			pruning_filters: config.pruning_filters.clone(),
1382		};
1383
1384		// Older DB versions have no last state key. Check if the state is available and set it.
1385		let info = backend.blockchain.info();
1386		if info.finalized_state.is_none() &&
1387			info.finalized_hash != Default::default() &&
1388			sc_client_api::Backend::have_state_at(
1389				&backend,
1390				info.finalized_hash,
1391				info.finalized_number,
1392			) {
1393			backend.blockchain.update_meta(MetaUpdate {
1394				hash: info.finalized_hash,
1395				number: info.finalized_number,
1396				is_best: info.finalized_hash == info.best_hash,
1397				is_finalized: true,
1398				with_state: true,
1399			});
1400		}
1401
1402		// Non archive nodes cannot fill the missing block gap with bodies.
1403		// If the gap is present, it means that every restart will try to fill the gap:
1404		// - a block request is made for each and every block in the gap
1405		// - the request is fulfilled putting pressure on the network and other nodes
1406		// - upon receiving the block, the block cannot be executed since the state
1407		//  of the parent block might have been discarded
1408		// - then the sync engine closes the gap in memory, but never in DB.
1409		//
1410		// This leads to inefficient syncing and high CPU usage on every restart. To mitigate this,
1411		// remove the gap from the DB if we detect it and the current node is not an archive.
1412		match (backend.is_archive, info.block_gap) {
1413			(false, Some(gap)) if matches!(gap.gap_type, BlockGapType::MissingBody) => {
1414				warn!(
1415					"Detected a missing body gap for non-archive nodes. Removing the gap={:?}",
1416					gap
1417				);
1418
1419				db_init_transaction.remove(columns::META, meta_keys::BLOCK_GAP);
1420				db_init_transaction.remove(columns::META, meta_keys::BLOCK_GAP_VERSION);
1421				backend.blockchain.update_block_gap(None);
1422			},
1423			_ => {},
1424		}
1425
1426		db.commit(db_init_transaction)?;
1427
1428		Ok(backend)
1429	}
1430
1431	/// Handle setting head within a transaction. `route_to` should be the last
1432	/// block that existed in the database. `best_to` should be the best block
1433	/// to be set.
1434	///
1435	/// In the case where the new best block is a block to be imported, `route_to`
1436	/// should be the parent of `best_to`. In the case where we set an existing block
1437	/// to be best, `route_to` should equal to `best_to`.
1438	fn set_head_with_transaction(
1439		&self,
1440		transaction: &mut Transaction<DbHash>,
1441		route_to: Block::Hash,
1442		best_to: (NumberFor<Block>, Block::Hash),
1443	) -> ClientResult<(Vec<Block::Hash>, Vec<Block::Hash>)> {
1444		let mut enacted = Vec::default();
1445		let mut retracted = Vec::default();
1446
1447		let (best_number, best_hash) = best_to;
1448
1449		let meta = self.blockchain.meta.read();
1450
1451		if meta.best_number.saturating_sub(best_number).saturated_into::<u64>() >
1452			self.canonicalization_delay
1453		{
1454			return Err(sp_blockchain::Error::SetHeadTooOld);
1455		}
1456
1457		let parent_exists =
1458			self.blockchain.status(route_to)? == sp_blockchain::BlockStatus::InChain;
1459
1460		// Cannot find tree route with empty DB or when imported a detached block.
1461		if meta.best_hash != Default::default() && parent_exists {
1462			let tree_route = sp_blockchain::tree_route(&self.blockchain, meta.best_hash, route_to)?;
1463
1464			// uncanonicalize: check safety violations and ensure the numbers no longer
1465			// point to these block hashes in the key mapping.
1466			for r in tree_route.retracted() {
1467				if r.hash == meta.finalized_hash {
1468					warn!(
1469						"Potential safety failure: reverting finalized block {:?}",
1470						(&r.number, &r.hash)
1471					);
1472
1473					return Err(sp_blockchain::Error::NotInFinalizedChain);
1474				}
1475
1476				retracted.push(r.hash);
1477				utils::remove_number_to_key_mapping(transaction, columns::KEY_LOOKUP, r.number)?;
1478			}
1479
1480			// canonicalize: set the number lookup to map to this block's hash.
1481			for e in tree_route.enacted() {
1482				enacted.push(e.hash);
1483				utils::insert_number_to_key_mapping(
1484					transaction,
1485					columns::KEY_LOOKUP,
1486					e.number,
1487					e.hash,
1488				)?;
1489			}
1490		}
1491
1492		let lookup_key = utils::number_and_hash_to_lookup_key(best_number, &best_hash)?;
1493		transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, lookup_key);
1494		utils::insert_number_to_key_mapping(
1495			transaction,
1496			columns::KEY_LOOKUP,
1497			best_number,
1498			best_hash,
1499		)?;
1500
1501		Ok((enacted, retracted))
1502	}
1503
1504	fn ensure_sequential_finalization(
1505		&self,
1506		header: &Block::Header,
1507		last_finalized: Option<Block::Hash>,
1508	) -> ClientResult<()> {
1509		let last_finalized =
1510			last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash);
1511		if last_finalized != self.blockchain.meta.read().genesis_hash &&
1512			*header.parent_hash() != last_finalized
1513		{
1514			return Err(sp_blockchain::Error::NonSequentialFinalization(format!(
1515				"Last finalized {last_finalized:?} not parent of {:?}",
1516				header.hash()
1517			)));
1518		}
1519		Ok(())
1520	}
1521
1522	/// `remove_displaced` can be set to `false` if this is not the last of many subsequent calls
1523	/// for performance reasons.
1524	fn finalize_block_with_transaction(
1525		&self,
1526		transaction: &mut Transaction<DbHash>,
1527		hash: Block::Hash,
1528		header: &Block::Header,
1529		last_finalized: Option<Block::Hash>,
1530		justification: Option<Justification>,
1531		current_transaction_justifications: &mut HashMap<Block::Hash, Justification>,
1532		remove_displaced: bool,
1533	) -> ClientResult<MetaUpdate<Block>> {
1534		// TODO: ensure best chain contains this block.
1535		let number = *header.number();
1536		self.ensure_sequential_finalization(header, last_finalized)?;
1537		let with_state = sc_client_api::Backend::have_state_at(self, hash, number);
1538
1539		self.note_finalized(
1540			transaction,
1541			header,
1542			hash,
1543			with_state,
1544			current_transaction_justifications,
1545			remove_displaced,
1546		)?;
1547
1548		if let Some(justification) = justification {
1549			transaction.set_from_vec(
1550				columns::JUSTIFICATIONS,
1551				&utils::number_and_hash_to_lookup_key(number, hash)?,
1552				Justifications::from(justification.clone()).encode(),
1553			);
1554			current_transaction_justifications.insert(hash, justification);
1555		}
1556		Ok(MetaUpdate { hash, number, is_best: false, is_finalized: true, with_state })
1557	}
1558
1559	// performs forced canonicalization with a delay after importing a non-finalized block.
1560	fn force_delayed_canonicalize(
1561		&self,
1562		transaction: &mut Transaction<DbHash>,
1563	) -> ClientResult<()> {
1564		let best_canonical = match self.storage.state_db.last_canonicalized() {
1565			LastCanonicalized::None => 0,
1566			LastCanonicalized::Block(b) => b,
1567			// Nothing needs to be done when canonicalization is not happening.
1568			LastCanonicalized::NotCanonicalizing => return Ok(()),
1569		};
1570
1571		let info = self.blockchain.info();
1572		let best_number: u64 = self.blockchain.info().best_number.saturated_into();
1573
1574		for to_canonicalize in
1575			best_canonical + 1..=best_number.saturating_sub(self.canonicalization_delay)
1576		{
1577			let hash_to_canonicalize = sc_client_api::blockchain::HeaderBackend::hash(
1578				&self.blockchain,
1579				to_canonicalize.saturated_into(),
1580			)?
1581			.ok_or_else(|| {
1582				let best_hash = info.best_hash;
1583
1584				sp_blockchain::Error::Backend(format!(
1585					"Can't canonicalize missing block number #{to_canonicalize} when for best block {best_hash:?} (#{best_number})",
1586				))
1587			})?;
1588
1589			if !sc_client_api::Backend::have_state_at(
1590				self,
1591				hash_to_canonicalize,
1592				to_canonicalize.saturated_into(),
1593			) {
1594				return Ok(());
1595			}
1596
1597			trace!(target: "db", "Canonicalize block #{to_canonicalize} ({hash_to_canonicalize:?})");
1598			let commit = self.storage.state_db.canonicalize_block(&hash_to_canonicalize).map_err(
1599				sp_blockchain::Error::from_state_db::<
1600					sc_state_db::Error<sp_database::error::DatabaseError>,
1601				>,
1602			)?;
1603			apply_state_commit(transaction, commit);
1604		}
1605
1606		Ok(())
1607	}
1608
1609	fn try_commit_operation(&self, mut operation: BlockImportOperation<Block>) -> ClientResult<()> {
1610		let mut transaction = Transaction::new();
1611
1612		operation.apply_aux(&mut transaction);
1613		operation.apply_offchain(&mut transaction);
1614
1615		let mut meta_updates = Vec::with_capacity(operation.finalized_blocks.len());
1616		let (best_num, mut last_finalized_hash, mut last_finalized_num, mut block_gap) = {
1617			let meta = self.blockchain.meta.read();
1618			(meta.best_number, meta.finalized_hash, meta.finalized_number, meta.block_gap)
1619		};
1620
1621		let mut block_gap_updated = false;
1622
1623		let mut current_transaction_justifications: HashMap<Block::Hash, Justification> =
1624			HashMap::new();
1625		let mut finalized_blocks = operation.finalized_blocks.into_iter().peekable();
1626		while let Some((block_hash, justification)) = finalized_blocks.next() {
1627			let block_header = self.blockchain.expect_header(block_hash)?;
1628			meta_updates.push(self.finalize_block_with_transaction(
1629				&mut transaction,
1630				block_hash,
1631				&block_header,
1632				Some(last_finalized_hash),
1633				justification,
1634				&mut current_transaction_justifications,
1635				finalized_blocks.peek().is_none(),
1636			)?);
1637			last_finalized_hash = block_hash;
1638			last_finalized_num = *block_header.number();
1639		}
1640
1641		let imported = if let Some(pending_block) = operation.pending_block {
1642			let hash = pending_block.header.hash();
1643
1644			let parent_hash = *pending_block.header.parent_hash();
1645			let number = *pending_block.header.number();
1646			let highest_leaf = self
1647				.blockchain
1648				.leaves
1649				.read()
1650				.highest_leaf()
1651				.map(|(n, _)| n)
1652				.unwrap_or(Zero::zero());
1653			let header_exists_in_db =
1654				number <= highest_leaf && self.blockchain.header(hash)?.is_some();
1655			// Body in DB (not incoming block) - needed to update gap when adding body to existing
1656			// header.
1657			let body_exists_in_db = self.blockchain.body(hash)?.is_some();
1658			// Incoming block has body - used for fast sync gap handling.
1659			let incoming_has_body = pending_block.body.is_some();
1660
1661			// blocks are keyed by number + hash.
1662			let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?;
1663
1664			if pending_block.leaf_state.is_best() {
1665				self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?;
1666			};
1667
1668			utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash)?;
1669
1670			transaction.set_from_vec(columns::HEADER, &lookup_key, pending_block.header.encode());
1671			if let Some(body) = pending_block.body {
1672				// If we have any index operations we save block in the new format with indexed
1673				// extrinsic headers Otherwise we save the body as a single blob.
1674				if operation.index_ops.is_empty() {
1675					transaction.set_from_vec(columns::BODY, &lookup_key, body.encode());
1676				} else {
1677					let body =
1678						apply_index_ops::<Block>(&mut transaction, body, operation.index_ops);
1679					transaction.set_from_vec(columns::BODY_INDEX, &lookup_key, body);
1680				}
1681			}
1682			if let Some(body) = pending_block.indexed_body {
1683				apply_indexed_body::<Block>(&mut transaction, body);
1684			}
1685			if let Some(justifications) = pending_block.justifications {
1686				transaction.set_from_vec(
1687					columns::JUSTIFICATIONS,
1688					&lookup_key,
1689					justifications.encode(),
1690				);
1691			}
1692
1693			if number.is_zero() {
1694				transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref());
1695
1696				if operation.commit_state {
1697					transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key);
1698				} else {
1699					// When we don't want to commit the genesis state, we still preserve it in
1700					// memory to bootstrap consensus. It is queried for an initial list of
1701					// authorities, etc.
1702					*self.genesis_state.write() = Some(Arc::new(DbGenesisStorage::new(
1703						*pending_block.header.state_root(),
1704						operation.db_updates.clone(),
1705					)));
1706				}
1707			}
1708
1709			let finalized = if operation.commit_state {
1710				let mut changeset: sc_state_db::ChangeSet<Vec<u8>> =
1711					sc_state_db::ChangeSet::default();
1712				let mut ops: u64 = 0;
1713				let mut bytes: u64 = 0;
1714				let mut removal: u64 = 0;
1715				let mut bytes_removal: u64 = 0;
1716				for (mut key, (val, rc)) in operation.db_updates.drain() {
1717					self.storage.db.sanitize_key(&mut key);
1718					if rc > 0 {
1719						ops += 1;
1720						bytes += key.len() as u64 + val.len() as u64;
1721						if rc == 1 {
1722							changeset.inserted.push((key, val.to_vec()));
1723						} else {
1724							changeset.inserted.push((key.clone(), val.to_vec()));
1725							for _ in 0..rc - 1 {
1726								changeset.inserted.push((key.clone(), Default::default()));
1727							}
1728						}
1729					} else if rc < 0 {
1730						removal += 1;
1731						bytes_removal += key.len() as u64;
1732						if rc == -1 {
1733							changeset.deleted.push(key);
1734						} else {
1735							for _ in 0..-rc {
1736								changeset.deleted.push(key.clone());
1737							}
1738						}
1739					}
1740				}
1741				self.state_usage.tally_writes_nodes(ops, bytes);
1742				self.state_usage.tally_removed_nodes(removal, bytes_removal);
1743
1744				let mut ops: u64 = 0;
1745				let mut bytes: u64 = 0;
1746				for (key, value) in operation
1747					.storage_updates
1748					.iter()
1749					.chain(operation.child_storage_updates.iter().flat_map(|(_, s)| s.iter()))
1750				{
1751					ops += 1;
1752					bytes += key.len() as u64;
1753					if let Some(v) = value.as_ref() {
1754						bytes += v.len() as u64;
1755					}
1756				}
1757				self.state_usage.tally_writes(ops, bytes);
1758				let number_u64 = number.saturated_into::<u64>();
1759				let commit = self
1760					.storage
1761					.state_db
1762					.insert_block(&hash, number_u64, pending_block.header.parent_hash(), changeset)
1763					.map_err(|e: sc_state_db::Error<sp_database::error::DatabaseError>| {
1764						sp_blockchain::Error::from_state_db(e)
1765					})?;
1766				apply_state_commit(&mut transaction, commit);
1767				if number <= last_finalized_num {
1768					// Canonicalize in the db when re-importing existing blocks with state.
1769					let commit = self.storage.state_db.canonicalize_block(&hash).map_err(
1770						sp_blockchain::Error::from_state_db::<
1771							sc_state_db::Error<sp_database::error::DatabaseError>,
1772						>,
1773					)?;
1774					apply_state_commit(&mut transaction, commit);
1775					meta_updates.push(MetaUpdate {
1776						hash,
1777						number,
1778						is_best: false,
1779						is_finalized: true,
1780						with_state: true,
1781					});
1782				}
1783
1784				// Check if need to finalize. Genesis is always finalized instantly.
1785				let finalized = number_u64 == 0 || pending_block.leaf_state.is_final();
1786				finalized
1787			} else {
1788				(number.is_zero() && last_finalized_num.is_zero()) ||
1789					pending_block.leaf_state.is_final()
1790			};
1791
1792			let header = &pending_block.header;
1793			let is_best = pending_block.leaf_state.is_best();
1794			trace!(
1795				target: "db",
1796				"DB Commit {hash:?} ({number}), best={is_best}, state={}, header_in_db={header_exists_in_db} body_in_db={body_exists_in_db} incoming_body={incoming_has_body}, finalized={finalized}",
1797				operation.commit_state,
1798			);
1799
1800			self.state_usage.merge_sm(operation.old_state.usage_info());
1801
1802			// release state reference so that it can be finalized
1803			// VERY IMPORTANT
1804			drop(operation.old_state);
1805
1806			if finalized {
1807				// TODO: ensure best chain contains this block.
1808				self.ensure_sequential_finalization(header, Some(last_finalized_hash))?;
1809				let mut current_transaction_justifications = HashMap::new();
1810				self.note_finalized(
1811					&mut transaction,
1812					header,
1813					hash,
1814					operation.commit_state,
1815					&mut current_transaction_justifications,
1816					true,
1817				)?;
1818			} else {
1819				// canonicalize blocks which are old enough, regardless of finality.
1820				self.force_delayed_canonicalize(&mut transaction)?
1821			}
1822
1823			if !header_exists_in_db {
1824				// Add a new leaf if the block has the potential to be finalized.
1825				if pending_block.register_as_leaf &&
1826					(number > last_finalized_num || last_finalized_num.is_zero())
1827				{
1828					let mut leaves = self.blockchain.leaves.write();
1829					leaves.import(hash, number, parent_hash);
1830					leaves.prepare_transaction(
1831						&mut transaction,
1832						columns::META,
1833						meta_keys::LEAF_PREFIX,
1834					);
1835				}
1836
1837				let mut children = children::read_children(
1838					&*self.storage.db,
1839					columns::META,
1840					meta_keys::CHILDREN_PREFIX,
1841					parent_hash,
1842				)?;
1843				if !children.contains(&hash) {
1844					children.push(hash);
1845					children::write_children(
1846						&mut transaction,
1847						columns::META,
1848						meta_keys::CHILDREN_PREFIX,
1849						parent_hash,
1850						children,
1851					);
1852				}
1853			}
1854
1855			let should_check_block_gap = !header_exists_in_db || !body_exists_in_db;
1856			debug!(
1857				target: "db",
1858				"should_check_block_gap = {should_check_block_gap}",
1859			);
1860
1861			if should_check_block_gap {
1862				let update_gap =
1863					|transaction: &mut Transaction<DbHash>,
1864					 new_gap: BlockGap<NumberFor<Block>>,
1865					 block_gap: &mut Option<BlockGap<NumberFor<Block>>>| {
1866						transaction.set(columns::META, meta_keys::BLOCK_GAP, &new_gap.encode());
1867						transaction.set(
1868							columns::META,
1869							meta_keys::BLOCK_GAP_VERSION,
1870							&BLOCK_GAP_CURRENT_VERSION.encode(),
1871						);
1872						block_gap.replace(new_gap);
1873						debug!(target: "db", "Update block gap. {block_gap:?}");
1874					};
1875
1876				let remove_gap =
1877					|transaction: &mut Transaction<DbHash>,
1878					 block_gap: &mut Option<BlockGap<NumberFor<Block>>>| {
1879						transaction.remove(columns::META, meta_keys::BLOCK_GAP);
1880						transaction.remove(columns::META, meta_keys::BLOCK_GAP_VERSION);
1881						*block_gap = None;
1882						debug!(target: "db", "Removed block gap.");
1883					};
1884
1885				if let Some(mut gap) = block_gap {
1886					match gap.gap_type {
1887						BlockGapType::MissingHeaderAndBody => {
1888							// Handle blocks at gap start or immediately following (possibly
1889							// indicating blocks already imported during warp sync where
1890							// start was not updated).
1891							if number == gap.start {
1892								gap.start = number + One::one();
1893								utils::insert_number_to_key_mapping(
1894									&mut transaction,
1895									columns::KEY_LOOKUP,
1896									number,
1897									hash,
1898								)?;
1899								if gap.start > gap.end {
1900									remove_gap(&mut transaction, &mut block_gap);
1901								} else {
1902									update_gap(&mut transaction, gap, &mut block_gap);
1903								}
1904								block_gap_updated = true;
1905							}
1906						},
1907						BlockGapType::MissingBody => {
1908							// Gap increased when syncing the header chain during fast sync.
1909							if number == gap.end + One::one() && !incoming_has_body {
1910								gap.end += One::one();
1911								utils::insert_number_to_key_mapping(
1912									&mut transaction,
1913									columns::KEY_LOOKUP,
1914									number,
1915									hash,
1916								)?;
1917								update_gap(&mut transaction, gap, &mut block_gap);
1918								block_gap_updated = true;
1919							// Gap decreased when downloading the full blocks.
1920							} else if number == gap.start && incoming_has_body {
1921								gap.start += One::one();
1922								if gap.start > gap.end {
1923									remove_gap(&mut transaction, &mut block_gap);
1924								} else {
1925									update_gap(&mut transaction, gap, &mut block_gap);
1926								}
1927								block_gap_updated = true;
1928							}
1929						},
1930					}
1931				} else if operation.create_gap {
1932					if number > best_num + One::one() &&
1933						self.blockchain.header(parent_hash)?.is_none()
1934					{
1935						let gap = BlockGap {
1936							start: best_num + One::one(),
1937							end: number - One::one(),
1938							gap_type: BlockGapType::MissingHeaderAndBody,
1939						};
1940						update_gap(&mut transaction, gap, &mut block_gap);
1941						block_gap_updated = true;
1942						debug!(target: "db", "Detected block gap (warp sync) {block_gap:?}");
1943					} else if number == best_num + One::one() &&
1944						self.blockchain.header(parent_hash)?.is_some() &&
1945						!incoming_has_body
1946					{
1947						let gap = BlockGap {
1948							start: number,
1949							end: number,
1950							gap_type: BlockGapType::MissingBody,
1951						};
1952						update_gap(&mut transaction, gap, &mut block_gap);
1953						block_gap_updated = true;
1954						debug!(target: "db", "Detected block gap (fast sync) {block_gap:?}");
1955					}
1956				}
1957			}
1958
1959			meta_updates.push(MetaUpdate {
1960				hash,
1961				number,
1962				is_best: pending_block.leaf_state.is_best(),
1963				is_finalized: finalized,
1964				with_state: operation.commit_state,
1965			});
1966			Some((pending_block.header, hash))
1967		} else {
1968			None
1969		};
1970
1971		if let Some(set_head) = operation.set_head {
1972			if let Some(header) =
1973				sc_client_api::blockchain::HeaderBackend::header(&self.blockchain, set_head)?
1974			{
1975				let number = header.number();
1976				let hash = header.hash();
1977
1978				self.set_head_with_transaction(&mut transaction, hash, (*number, hash))?;
1979
1980				meta_updates.push(MetaUpdate {
1981					hash,
1982					number: *number,
1983					is_best: true,
1984					is_finalized: false,
1985					with_state: false,
1986				});
1987			} else {
1988				return Err(sp_blockchain::Error::UnknownBlock(format!(
1989					"Cannot set head {set_head:?}",
1990				)));
1991			}
1992		}
1993
1994		self.storage.db.commit(transaction)?;
1995
1996		// `reset_storage == true` means the entire state got replaced.
1997		// In this case we optimize the `STATE` column to improve read performance.
1998		if operation.reset_storage {
1999			if let Err(e) = self.storage.db.optimize_db_col(columns::STATE) {
2000				warn!(target: "db", "Failed to optimize database after state import: {e:?}");
2001			}
2002		}
2003
2004		// Apply all in-memory state changes.
2005		// Code beyond this point can't fail.
2006
2007		if let Some((header, hash)) = imported {
2008			trace!(target: "db", "DB Commit done {hash:?}");
2009			let header_metadata = CachedHeaderMetadata::from(&header);
2010			self.blockchain.insert_header_metadata(header_metadata.hash, header_metadata);
2011			cache_header(&mut self.blockchain.header_cache.lock(), hash, Some(header));
2012		}
2013
2014		for m in meta_updates {
2015			self.blockchain.update_meta(m);
2016		}
2017		if block_gap_updated {
2018			self.blockchain.update_block_gap(block_gap);
2019		}
2020
2021		Ok(())
2022	}
2023
2024	// Write stuff to a transaction after a new block is finalized. This canonicalizes finalized
2025	// blocks. Fails if called with a block which was not a child of the last finalized block.
2026	/// `remove_displaced` can be set to `false` if this is not the last of many subsequent calls
2027	/// for performance reasons.
2028	fn note_finalized(
2029		&self,
2030		transaction: &mut Transaction<DbHash>,
2031		f_header: &Block::Header,
2032		f_hash: Block::Hash,
2033		with_state: bool,
2034		current_transaction_justifications: &mut HashMap<Block::Hash, Justification>,
2035		remove_displaced: bool,
2036	) -> ClientResult<()> {
2037		let f_num = *f_header.number();
2038
2039		let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash)?;
2040		if with_state {
2041			transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key.clone());
2042		}
2043		transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key);
2044
2045		let requires_canonicalization = match self.storage.state_db.last_canonicalized() {
2046			LastCanonicalized::None => true,
2047			LastCanonicalized::Block(b) => f_num.saturated_into::<u64>() > b,
2048			LastCanonicalized::NotCanonicalizing => false,
2049		};
2050
2051		if requires_canonicalization && sc_client_api::Backend::have_state_at(self, f_hash, f_num) {
2052			let commit = self.storage.state_db.canonicalize_block(&f_hash).map_err(
2053				sp_blockchain::Error::from_state_db::<
2054					sc_state_db::Error<sp_database::error::DatabaseError>,
2055				>,
2056			)?;
2057			apply_state_commit(transaction, commit);
2058		}
2059
2060		if remove_displaced {
2061			let new_displaced = self.blockchain.displaced_leaves_after_finalizing(
2062				f_hash,
2063				f_num,
2064				*f_header.parent_hash(),
2065			)?;
2066
2067			self.blockchain.leaves.write().remove_displaced_leaves(FinalizationOutcome::new(
2068				new_displaced.displaced_leaves.iter().copied(),
2069			));
2070
2071			if !matches!(self.blocks_pruning, BlocksPruning::KeepAll) {
2072				self.prune_displaced_branches(transaction, &new_displaced)?;
2073			}
2074		}
2075
2076		self.prune_blocks(transaction, f_num, current_transaction_justifications)?;
2077
2078		Ok(())
2079	}
2080
2081	fn prune_blocks(
2082		&self,
2083		transaction: &mut Transaction<DbHash>,
2084		finalized_number: NumberFor<Block>,
2085		current_transaction_justifications: &mut HashMap<Block::Hash, Justification>,
2086	) -> ClientResult<()> {
2087		if let BlocksPruning::Some(blocks_pruning) = self.blocks_pruning {
2088			// Always keep the last finalized block
2089			let keep = std::cmp::max(blocks_pruning, 1);
2090			if finalized_number >= keep.into() {
2091				let number = finalized_number.saturating_sub(keep.into());
2092
2093				// Before we prune a block, check if it is pinned
2094				if let Some(hash) = self.blockchain.hash(number)? {
2095					// Check if any pruning filter wants to preserve this block.
2096					// We need to check both the current transaction justifications (not yet in DB)
2097					// and the DB itself (for justifications from previous transactions).
2098					if !self.pruning_filters.is_empty() {
2099						let justifications = match current_transaction_justifications.get(&hash) {
2100							Some(j) => Some(Justifications::from(j.clone())),
2101							None => self.blockchain.justifications(hash)?,
2102						};
2103
2104						let should_retain = justifications
2105							.map(|j| self.pruning_filters.iter().any(|f| f.should_retain(&j)))
2106							.unwrap_or(false);
2107
2108						// We can just return here, pinning can be ignored since the block will
2109						// remain in the DB.
2110						if should_retain {
2111							debug!(
2112								target: "db",
2113								"Preserving block #{number} ({hash}) due to keep predicate match"
2114							);
2115							return Ok(());
2116						}
2117					}
2118
2119					self.blockchain.insert_persisted_body_if_pinned(hash)?;
2120
2121					// If the block was finalized in this transaction, it will not be in the db
2122					// yet.
2123					if let Some(justification) = current_transaction_justifications.remove(&hash) {
2124						self.blockchain.insert_justifications_if_pinned(hash, justification);
2125					} else {
2126						self.blockchain.insert_persisted_justifications_if_pinned(hash)?;
2127					}
2128				};
2129
2130				self.prune_block(transaction, BlockId::<Block>::number(number))?;
2131			}
2132		}
2133		Ok(())
2134	}
2135
2136	fn prune_displaced_branches(
2137		&self,
2138		transaction: &mut Transaction<DbHash>,
2139		displaced: &DisplacedLeavesAfterFinalization<Block>,
2140	) -> ClientResult<()> {
2141		// Discard all blocks from displaced branches
2142		for &hash in displaced.displaced_blocks.iter() {
2143			self.blockchain.insert_persisted_body_if_pinned(hash)?;
2144			self.prune_block(transaction, BlockId::<Block>::hash(hash))?;
2145		}
2146		Ok(())
2147	}
2148
2149	fn prune_block(
2150		&self,
2151		transaction: &mut Transaction<DbHash>,
2152		id: BlockId<Block>,
2153	) -> ClientResult<()> {
2154		debug!(target: "db", "Removing block #{id}");
2155		utils::remove_from_db(
2156			transaction,
2157			&*self.storage.db,
2158			columns::KEY_LOOKUP,
2159			columns::BODY,
2160			id,
2161		)?;
2162		utils::remove_from_db(
2163			transaction,
2164			&*self.storage.db,
2165			columns::KEY_LOOKUP,
2166			columns::JUSTIFICATIONS,
2167			id,
2168		)?;
2169		if let Some(index) =
2170			read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)?
2171		{
2172			utils::remove_from_db(
2173				transaction,
2174				&*self.storage.db,
2175				columns::KEY_LOOKUP,
2176				columns::BODY_INDEX,
2177				id,
2178			)?;
2179			match Vec::<DbExtrinsic<Block>>::decode(&mut &index[..]) {
2180				Ok(index) => {
2181					for ex in index {
2182						match ex {
2183							DbExtrinsic::Indexed { hash, .. } => {
2184								transaction.release(columns::TRANSACTION, hash);
2185							},
2186							DbExtrinsic::MultiRenew { hashes, .. } => {
2187								for hash in hashes {
2188									transaction.release(columns::TRANSACTION, hash);
2189								}
2190							},
2191							DbExtrinsic::Full(_) => {},
2192						}
2193					}
2194				},
2195				Err(err) => {
2196					return Err(sp_blockchain::Error::Backend(format!(
2197						"Error decoding body list: {err}",
2198					)))
2199				},
2200			}
2201		}
2202		Ok(())
2203	}
2204
2205	fn empty_state(&self) -> RecordStatsState<RefTrackingState<Block>, Block> {
2206		let root = EmptyStorage::<Block>::new().0; // Empty trie
2207		let db_state = DbStateBuilder::<HashingFor<Block>>::new(self.storage.clone(), root)
2208			.with_optional_cache(self.shared_trie_cache.as_ref().map(|c| c.local_cache_untrusted()))
2209			.build();
2210		let state = RefTrackingState::new(db_state, self.storage.clone(), None);
2211		RecordStatsState::new(state, None, self.state_usage.clone())
2212	}
2213}
2214
2215fn apply_state_commit(
2216	transaction: &mut Transaction<DbHash>,
2217	commit: sc_state_db::CommitSet<Vec<u8>>,
2218) {
2219	for (key, val) in commit.data.inserted.into_iter() {
2220		transaction.set_from_vec(columns::STATE, &key[..], val);
2221	}
2222	for key in commit.data.deleted.into_iter() {
2223		transaction.remove(columns::STATE, &key[..]);
2224	}
2225	for (key, val) in commit.meta.inserted.into_iter() {
2226		transaction.set_from_vec(columns::STATE_META, &key[..], val);
2227	}
2228	for key in commit.meta.deleted.into_iter() {
2229		transaction.remove(columns::STATE_META, &key[..]);
2230	}
2231}
2232
2233fn apply_index_ops<Block: BlockT>(
2234	transaction: &mut Transaction<DbHash>,
2235	body: Vec<Block::Extrinsic>,
2236	ops: Vec<IndexOperation>,
2237) -> Vec<u8> {
2238	let mut extrinsic_index: Vec<DbExtrinsic<Block>> = Vec::with_capacity(body.len());
2239	let mut index_map = HashMap::new();
2240	// Submission order matters; see `DbExtrinsic::MultiRenew`. Duplicates are kept so
2241	// per-occurrence refcount inc/dec stays symmetric with prune-time release.
2242	let mut renewed_map: HashMap<u32, Vec<DbHash>> = HashMap::new();
2243	for op in ops {
2244		match op {
2245			IndexOperation::Insert { extrinsic, hash, size } => {
2246				index_map.insert(extrinsic, (hash, size));
2247			},
2248			IndexOperation::Renew { extrinsic, hash } => {
2249				renewed_map
2250					.entry(extrinsic)
2251					.or_default()
2252					.push(DbHash::from_slice(hash.as_ref()));
2253			},
2254		}
2255	}
2256	let mut n_inserted = 0usize;
2257	let mut n_renew_slots = 0usize;
2258	let mut n_renew_hashes = 0usize;
2259	let mut n_full = 0usize;
2260	for (index, extrinsic) in body.into_iter().enumerate() {
2261		let db_extrinsic = if let Some(hashes) = renewed_map.remove(&(index as u32)) {
2262			n_renew_slots += 1;
2263			n_renew_hashes += hashes.len();
2264			let encoded = extrinsic.encode();
2265			if hashes.len() == 1 {
2266				// Single renewal: backwards-compatible Indexed variant
2267				let hash = hashes[0];
2268				transaction.reference(columns::TRANSACTION, hash);
2269				DbExtrinsic::Indexed { hash, header: encoded }
2270			} else {
2271				// Multi-renewal: bump ref counter for each hash
2272				for hash in &hashes {
2273					transaction.reference(columns::TRANSACTION, *hash);
2274				}
2275				DbExtrinsic::MultiRenew { hashes, extrinsic: encoded }
2276			}
2277		} else {
2278			match index_map.get(&(index as u32)) {
2279				Some((hash, size)) => {
2280					let encoded = extrinsic.encode();
2281					if *size as usize <= encoded.len() {
2282						n_inserted += 1;
2283						let offset = encoded.len() - *size as usize;
2284						transaction.store(
2285							columns::TRANSACTION,
2286							DbHash::from_slice(hash.as_ref()),
2287							encoded[offset..].to_vec(),
2288						);
2289						DbExtrinsic::Indexed {
2290							hash: DbHash::from_slice(hash.as_ref()),
2291							header: encoded[..offset].to_vec(),
2292						}
2293					} else {
2294						// Invalid indexed slice. Just store full data and don't index anything.
2295						n_full += 1;
2296						DbExtrinsic::Full(extrinsic)
2297					}
2298				},
2299				_ => {
2300					n_full += 1;
2301					DbExtrinsic::Full(extrinsic)
2302				},
2303			}
2304		};
2305		extrinsic_index.push(db_extrinsic);
2306	}
2307	debug!(
2308		target: "db",
2309		"DB transaction index: {} inserted, {} slots renewed ({} hashes), {} full",
2310		n_inserted,
2311		n_renew_slots,
2312		n_renew_hashes,
2313		n_full,
2314	);
2315	extrinsic_index.encode()
2316}
2317
2318fn apply_indexed_body<Block: BlockT>(transaction: &mut Transaction<DbHash>, body: Vec<Vec<u8>>) {
2319	for extrinsic in body {
2320		let hash = sp_runtime::traits::BlakeTwo256::hash(&extrinsic);
2321		transaction.store(columns::TRANSACTION, DbHash::from_slice(hash.as_ref()), extrinsic);
2322	}
2323}
2324
2325impl<Block> sc_client_api::backend::AuxStore for Backend<Block>
2326where
2327	Block: BlockT,
2328{
2329	fn insert_aux<
2330		'a,
2331		'b: 'a,
2332		'c: 'a,
2333		I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
2334		D: IntoIterator<Item = &'a &'b [u8]>,
2335	>(
2336		&self,
2337		insert: I,
2338		delete: D,
2339	) -> ClientResult<()> {
2340		let mut transaction = Transaction::new();
2341		for (k, v) in insert {
2342			transaction.set(columns::AUX, k, v);
2343		}
2344		for k in delete {
2345			transaction.remove(columns::AUX, k);
2346		}
2347		self.storage.db.commit(transaction)?;
2348		Ok(())
2349	}
2350
2351	fn get_aux(&self, key: &[u8]) -> ClientResult<Option<Vec<u8>>> {
2352		Ok(self.storage.db.get(columns::AUX, key))
2353	}
2354}
2355
2356impl<Block: BlockT> sc_client_api::backend::Backend<Block> for Backend<Block> {
2357	type BlockImportOperation = BlockImportOperation<Block>;
2358	type Blockchain = BlockchainDb<Block>;
2359	type State = RecordStatsState<RefTrackingState<Block>, Block>;
2360	type OffchainStorage = offchain::LocalStorage;
2361
2362	fn begin_operation(&self) -> ClientResult<Self::BlockImportOperation> {
2363		Ok(BlockImportOperation {
2364			pending_block: None,
2365			old_state: self.empty_state(),
2366			db_updates: PrefixedMemoryDB::default(),
2367			storage_updates: Default::default(),
2368			child_storage_updates: Default::default(),
2369			offchain_storage_updates: Default::default(),
2370			aux_ops: Vec::new(),
2371			finalized_blocks: Vec::new(),
2372			set_head: None,
2373			commit_state: false,
2374			create_gap: true,
2375			reset_storage: false,
2376			index_ops: Default::default(),
2377		})
2378	}
2379
2380	fn begin_state_operation(
2381		&self,
2382		operation: &mut Self::BlockImportOperation,
2383		block: Block::Hash,
2384	) -> ClientResult<()> {
2385		if block == Default::default() {
2386			operation.old_state = self.empty_state();
2387		} else {
2388			operation.old_state = self.state_at(block, TrieCacheContext::Untrusted)?;
2389		}
2390
2391		operation.commit_state = true;
2392		Ok(())
2393	}
2394
2395	fn commit_operation(&self, operation: Self::BlockImportOperation) -> ClientResult<()> {
2396		let usage = operation.old_state.usage_info();
2397		self.state_usage.merge_sm(usage);
2398
2399		if let Err(e) = self.try_commit_operation(operation) {
2400			let state_meta_db = StateMetaDb(self.storage.db.clone());
2401			self.storage
2402				.state_db
2403				.reset(state_meta_db)
2404				.map_err(sp_blockchain::Error::from_state_db)?;
2405			self.blockchain.clear_pinning_cache();
2406			Err(e)
2407		} else {
2408			self.storage.state_db.sync();
2409			Ok(())
2410		}
2411	}
2412
2413	fn finalize_block(
2414		&self,
2415		hash: Block::Hash,
2416		justification: Option<Justification>,
2417	) -> ClientResult<()> {
2418		let mut transaction = Transaction::new();
2419		let header = self.blockchain.expect_header(hash)?;
2420
2421		let mut current_transaction_justifications = HashMap::new();
2422		let m = self.finalize_block_with_transaction(
2423			&mut transaction,
2424			hash,
2425			&header,
2426			None,
2427			justification,
2428			&mut current_transaction_justifications,
2429			true,
2430		)?;
2431
2432		self.storage.db.commit(transaction)?;
2433		self.blockchain.update_meta(m);
2434		Ok(())
2435	}
2436
2437	fn append_justification(
2438		&self,
2439		hash: Block::Hash,
2440		justification: Justification,
2441	) -> ClientResult<()> {
2442		let mut transaction: Transaction<DbHash> = Transaction::new();
2443		let header = self.blockchain.expect_header(hash)?;
2444		let number = *header.number();
2445
2446		// Check if the block is finalized first.
2447		let is_descendent_of = is_descendent_of(&self.blockchain, None);
2448		let last_finalized = self.blockchain.last_finalized()?;
2449
2450		// We can do a quick check first, before doing a proper but more expensive check
2451		if number > self.blockchain.info().finalized_number ||
2452			(hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?)
2453		{
2454			return Err(ClientError::NotInFinalizedChain);
2455		}
2456
2457		let justifications = if let Some(mut stored_justifications) =
2458			self.blockchain.justifications(hash)?
2459		{
2460			if !stored_justifications.append(justification) {
2461				return Err(ClientError::BadJustification("Duplicate consensus engine ID".into()));
2462			}
2463			stored_justifications
2464		} else {
2465			Justifications::from(justification)
2466		};
2467
2468		transaction.set_from_vec(
2469			columns::JUSTIFICATIONS,
2470			&utils::number_and_hash_to_lookup_key(number, hash)?,
2471			justifications.encode(),
2472		);
2473
2474		self.storage.db.commit(transaction)?;
2475
2476		Ok(())
2477	}
2478
2479	fn offchain_storage(&self) -> Option<Self::OffchainStorage> {
2480		Some(self.offchain_storage.clone())
2481	}
2482
2483	fn usage_info(&self) -> Option<UsageInfo> {
2484		let (io_stats, state_stats) = self.io_stats.take_or_else(|| {
2485			(
2486				// TODO: implement DB stats and cache size retrieval
2487				kvdb::IoStats::empty(),
2488				self.state_usage.take(),
2489			)
2490		});
2491		let database_cache = MemorySize::from_bytes(0);
2492		let state_cache = MemorySize::from_bytes(
2493			self.shared_trie_cache.as_ref().map_or(0, |c| c.used_memory_size()),
2494		);
2495
2496		Some(UsageInfo {
2497			memory: MemoryInfo { state_cache, database_cache },
2498			io: IoInfo {
2499				transactions: io_stats.transactions,
2500				bytes_read: io_stats.bytes_read,
2501				bytes_written: io_stats.bytes_written,
2502				writes: io_stats.writes,
2503				reads: io_stats.reads,
2504				average_transaction_size: io_stats.avg_transaction_size() as u64,
2505				state_reads: state_stats.reads.ops,
2506				state_writes: state_stats.writes.ops,
2507				state_writes_cache: state_stats.overlay_writes.ops,
2508				state_reads_cache: state_stats.cache_reads.ops,
2509				state_writes_nodes: state_stats.nodes_writes.ops,
2510			},
2511		})
2512	}
2513
2514	fn revert(
2515		&self,
2516		n: NumberFor<Block>,
2517		revert_finalized: bool,
2518	) -> ClientResult<(NumberFor<Block>, HashSet<Block::Hash>)> {
2519		let mut reverted_finalized = HashSet::new();
2520
2521		let info = self.blockchain.info();
2522
2523		let highest_leaf = self
2524			.blockchain
2525			.leaves
2526			.read()
2527			.highest_leaf()
2528			.and_then(|(n, h)| h.last().map(|h| (n, *h)));
2529
2530		let best_number = info.best_number;
2531		let best_hash = info.best_hash;
2532
2533		let finalized = info.finalized_number;
2534
2535		let revertible = best_number - finalized;
2536		let n = if !revert_finalized && revertible < n { revertible } else { n };
2537
2538		let (n, mut number_to_revert, mut hash_to_revert) = match highest_leaf {
2539			Some((l_n, l_h)) => (n + (l_n - best_number), l_n, l_h),
2540			None => (n, best_number, best_hash),
2541		};
2542
2543		let mut revert_blocks = || -> ClientResult<NumberFor<Block>> {
2544			for c in 0..n.saturated_into::<u64>() {
2545				if number_to_revert.is_zero() {
2546					return Ok(c.saturated_into::<NumberFor<Block>>());
2547				}
2548				let mut transaction = Transaction::new();
2549				let removed = self.blockchain.header(hash_to_revert)?.ok_or_else(|| {
2550					sp_blockchain::Error::UnknownBlock(format!(
2551						"Error reverting to {hash_to_revert}. Block header not found.",
2552					))
2553				})?;
2554				let removed_hash = hash_to_revert;
2555
2556				let prev_number = number_to_revert.saturating_sub(One::one());
2557				let prev_hash =
2558					if prev_number == best_number { best_hash } else { *removed.parent_hash() };
2559
2560				if !self.have_state_at(prev_hash, prev_number) {
2561					return Ok(c.saturated_into::<NumberFor<Block>>());
2562				}
2563
2564				match self.storage.state_db.revert_one() {
2565					Some(commit) => {
2566						apply_state_commit(&mut transaction, commit);
2567
2568						number_to_revert = prev_number;
2569						hash_to_revert = prev_hash;
2570
2571						let update_finalized = number_to_revert < finalized;
2572
2573						let key = utils::number_and_hash_to_lookup_key(
2574							number_to_revert,
2575							&hash_to_revert,
2576						)?;
2577						if update_finalized {
2578							transaction.set_from_vec(
2579								columns::META,
2580								meta_keys::FINALIZED_BLOCK,
2581								key.clone(),
2582							);
2583
2584							reverted_finalized.insert(removed_hash);
2585							if let Some((hash, _)) = self.blockchain.info().finalized_state {
2586								if hash == hash_to_revert {
2587									if !number_to_revert.is_zero() &&
2588										self.have_state_at(prev_hash, prev_number)
2589									{
2590										let lookup_key = utils::number_and_hash_to_lookup_key(
2591											prev_number,
2592											prev_hash,
2593										)?;
2594										transaction.set_from_vec(
2595											columns::META,
2596											meta_keys::FINALIZED_STATE,
2597											lookup_key,
2598										);
2599									} else {
2600										transaction
2601											.remove(columns::META, meta_keys::FINALIZED_STATE);
2602									}
2603								}
2604							}
2605						}
2606
2607						transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, key);
2608						transaction.remove(columns::KEY_LOOKUP, removed_hash.as_ref());
2609						children::remove_children(
2610							&mut transaction,
2611							columns::META,
2612							meta_keys::CHILDREN_PREFIX,
2613							hash_to_revert,
2614						);
2615						self.prune_block(&mut transaction, BlockId::Hash(removed_hash))?;
2616						remove_from_db::<Block>(
2617							&mut transaction,
2618							&*self.storage.db,
2619							columns::KEY_LOOKUP,
2620							columns::HEADER,
2621							BlockId::Hash(removed_hash),
2622						)?;
2623
2624						self.storage.db.commit(transaction)?;
2625
2626						// Clean the cache
2627						self.blockchain.remove_header_metadata(removed_hash);
2628
2629						let is_best = number_to_revert < best_number;
2630
2631						self.blockchain.update_meta(MetaUpdate {
2632							hash: hash_to_revert,
2633							number: number_to_revert,
2634							is_best,
2635							is_finalized: update_finalized,
2636							with_state: false,
2637						});
2638					},
2639					None => return Ok(c.saturated_into::<NumberFor<Block>>()),
2640				}
2641			}
2642
2643			Ok(n)
2644		};
2645
2646		let reverted = revert_blocks()?;
2647
2648		let revert_leaves = || -> ClientResult<()> {
2649			let mut transaction = Transaction::new();
2650			let mut leaves = self.blockchain.leaves.write();
2651
2652			leaves.revert(hash_to_revert, number_to_revert).into_iter().try_for_each(
2653				|(h, _)| {
2654					self.blockchain.remove_header_metadata(h);
2655					transaction.remove(columns::KEY_LOOKUP, h.as_ref());
2656
2657					self.prune_block(&mut transaction, BlockId::Hash(h))?;
2658					remove_from_db::<Block>(
2659						&mut transaction,
2660						&*self.storage.db,
2661						columns::KEY_LOOKUP,
2662						columns::HEADER,
2663						BlockId::Hash(h),
2664					)?;
2665
2666					Ok::<_, ClientError>(())
2667				},
2668			)?;
2669			leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX);
2670			self.storage.db.commit(transaction)?;
2671
2672			Ok(())
2673		};
2674
2675		revert_leaves()?;
2676
2677		Ok((reverted, reverted_finalized))
2678	}
2679
2680	fn remove_leaf_block(&self, hash: Block::Hash) -> ClientResult<()> {
2681		let best_hash = self.blockchain.info().best_hash;
2682
2683		if best_hash == hash {
2684			return Err(sp_blockchain::Error::Backend(format!("Can't remove best block {hash:?}")));
2685		}
2686
2687		let hdr = self.blockchain.header_metadata(hash)?;
2688		if !self.have_state_at(hash, hdr.number) {
2689			return Err(sp_blockchain::Error::UnknownBlock(format!(
2690				"State already discarded for {hash:?}",
2691			)));
2692		}
2693
2694		let mut leaves = self.blockchain.leaves.write();
2695		if !leaves.contains(hdr.number, hash) {
2696			return Err(sp_blockchain::Error::Backend(format!(
2697				"Can't remove non-leaf block {hash:?}",
2698			)));
2699		}
2700
2701		let mut transaction = Transaction::new();
2702		if let Some(commit) = self.storage.state_db.remove(&hash) {
2703			apply_state_commit(&mut transaction, commit);
2704		}
2705		transaction.remove(columns::KEY_LOOKUP, hash.as_ref());
2706
2707		let children: Vec<_> = self
2708			.blockchain()
2709			.children(hdr.parent)?
2710			.into_iter()
2711			.filter(|child_hash| *child_hash != hash)
2712			.collect();
2713		let parent_leaf = if children.is_empty() {
2714			children::remove_children(
2715				&mut transaction,
2716				columns::META,
2717				meta_keys::CHILDREN_PREFIX,
2718				hdr.parent,
2719			);
2720			Some(hdr.parent)
2721		} else {
2722			children::write_children(
2723				&mut transaction,
2724				columns::META,
2725				meta_keys::CHILDREN_PREFIX,
2726				hdr.parent,
2727				children,
2728			);
2729			None
2730		};
2731
2732		let remove_outcome = leaves.remove(hash, hdr.number, parent_leaf);
2733		leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX);
2734		if let Err(e) = self.storage.db.commit(transaction) {
2735			if let Some(outcome) = remove_outcome {
2736				leaves.undo().undo_remove(outcome);
2737			}
2738			return Err(e.into());
2739		}
2740		self.blockchain().remove_header_metadata(hash);
2741		Ok(())
2742	}
2743
2744	fn blockchain(&self) -> &BlockchainDb<Block> {
2745		&self.blockchain
2746	}
2747
2748	fn state_at(
2749		&self,
2750		hash: Block::Hash,
2751		trie_cache_context: TrieCacheContext,
2752	) -> ClientResult<Self::State> {
2753		if hash == self.blockchain.meta.read().genesis_hash {
2754			if let Some(genesis_state) = &*self.genesis_state.read() {
2755				let root = genesis_state.root;
2756				let db_state =
2757					DbStateBuilder::<HashingFor<Block>>::new(genesis_state.clone(), root)
2758						.with_optional_cache(self.shared_trie_cache.as_ref().map(|c| {
2759							if matches!(trie_cache_context, TrieCacheContext::Trusted) {
2760								c.local_cache_trusted()
2761							} else {
2762								c.local_cache_untrusted()
2763							}
2764						}))
2765						.build();
2766
2767				let state = RefTrackingState::new(db_state, self.storage.clone(), None);
2768				return Ok(RecordStatsState::new(state, None, self.state_usage.clone()));
2769			}
2770		}
2771
2772		match self.blockchain.header_metadata(hash) {
2773			Ok(ref hdr) => {
2774				let hint = || {
2775					sc_state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref())
2776						.unwrap_or(None)
2777						.is_some()
2778				};
2779
2780				if let Ok(()) =
2781					self.storage.state_db.pin(&hash, hdr.number.saturated_into::<u64>(), hint)
2782				{
2783					let root = hdr.state_root;
2784					let db_state =
2785						DbStateBuilder::<HashingFor<Block>>::new(self.storage.clone(), root)
2786							.with_optional_cache(self.shared_trie_cache.as_ref().map(|c| {
2787								if matches!(trie_cache_context, TrieCacheContext::Trusted) {
2788									c.local_cache_trusted()
2789								} else {
2790									c.local_cache_untrusted()
2791								}
2792							}))
2793							.build();
2794					let state = RefTrackingState::new(db_state, self.storage.clone(), Some(hash));
2795					Ok(RecordStatsState::new(state, Some(hash), self.state_usage.clone()))
2796				} else {
2797					Err(sp_blockchain::Error::UnknownBlock(format!(
2798						"State already discarded for {hash:?}",
2799					)))
2800				}
2801			},
2802			Err(e) => Err(e),
2803		}
2804	}
2805
2806	fn have_state_at(&self, hash: Block::Hash, number: NumberFor<Block>) -> bool {
2807		if self.is_archive {
2808			match self.blockchain.header_metadata(hash) {
2809				Ok(header) => sp_state_machine::Storage::get(
2810					self.storage.as_ref(),
2811					&header.state_root,
2812					(&[], None),
2813				)
2814				.unwrap_or(None)
2815				.is_some(),
2816				_ => false,
2817			}
2818		} else {
2819			match self.storage.state_db.is_pruned(&hash, number.saturated_into::<u64>()) {
2820				IsPruned::Pruned => false,
2821				IsPruned::NotPruned => true,
2822				IsPruned::MaybePruned => match self.blockchain.header_metadata(hash) {
2823					Ok(header) => sp_state_machine::Storage::get(
2824						self.storage.as_ref(),
2825						&header.state_root,
2826						(&[], None),
2827					)
2828					.unwrap_or(None)
2829					.is_some(),
2830					_ => false,
2831				},
2832			}
2833		}
2834	}
2835
2836	fn get_import_lock(&self) -> &RwLock<()> {
2837		&self.import_lock
2838	}
2839
2840	fn requires_full_sync(&self) -> bool {
2841		matches!(
2842			self.storage.state_db.pruning_mode(),
2843			PruningMode::ArchiveAll | PruningMode::ArchiveCanonical
2844		)
2845	}
2846
2847	fn pin_block(&self, hash: <Block as BlockT>::Hash) -> sp_blockchain::Result<()> {
2848		let hint = || {
2849			let header_metadata = self.blockchain.header_metadata(hash);
2850			header_metadata
2851				.map(|hdr| {
2852					sc_state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref())
2853						.unwrap_or(None)
2854						.is_some()
2855				})
2856				.unwrap_or(false)
2857		};
2858
2859		if let Some(number) = self.blockchain.number(hash)? {
2860			self.storage.state_db.pin(&hash, number.saturated_into::<u64>(), hint).map_err(
2861				|_| {
2862					sp_blockchain::Error::UnknownBlock(format!(
2863						"Unable to pin: state already discarded for `{hash:?}`",
2864					))
2865				},
2866			)?;
2867		} else {
2868			return Err(ClientError::UnknownBlock(format!(
2869				"Can not pin block with hash `{hash:?}`. Block not found.",
2870			)));
2871		}
2872
2873		if self.blocks_pruning != BlocksPruning::KeepAll {
2874			// Only increase reference count for this hash. Value is loaded once we prune.
2875			self.blockchain.bump_ref(hash);
2876		}
2877		Ok(())
2878	}
2879
2880	fn unpin_block(&self, hash: <Block as BlockT>::Hash) {
2881		self.storage.state_db.unpin(&hash);
2882
2883		if self.blocks_pruning != BlocksPruning::KeepAll {
2884			self.blockchain.unpin(hash);
2885		}
2886	}
2887}
2888
2889impl<Block: BlockT> sc_client_api::backend::LocalBackend<Block> for Backend<Block> {}
2890
2891#[cfg(test)]
2892pub(crate) mod tests {
2893	use super::*;
2894	use crate::{columns, utils::number_and_hash_to_lookup_key};
2895	use hash_db::{HashDB, EMPTY_PREFIX};
2896	use sc_client_api::{
2897		backend::{Backend as BTrait, BlockImportOperation as Op},
2898		blockchain::Backend as BLBTrait,
2899	};
2900	use sp_blockchain::{lowest_common_ancestor, tree_route};
2901	use sp_core::H256;
2902	use sp_runtime::{
2903		testing::{Block as RawBlock, Header, MockCallU64, TestXt},
2904		traits::{BlakeTwo256, Hash},
2905		ConsensusEngineId, StateVersion,
2906	};
2907
2908	const CONS0_ENGINE_ID: ConsensusEngineId = *b"CON0";
2909	const CONS1_ENGINE_ID: ConsensusEngineId = *b"CON1";
2910
2911	type UncheckedXt = TestXt<MockCallU64, ()>;
2912	pub(crate) type Block = RawBlock<UncheckedXt>;
2913
2914	pub fn insert_header(
2915		backend: &Backend<Block>,
2916		number: u64,
2917		parent_hash: H256,
2918		changes: Option<Vec<(Vec<u8>, Vec<u8>)>>,
2919		extrinsics_root: H256,
2920	) -> H256 {
2921		insert_block(backend, number, parent_hash, changes, extrinsics_root, Vec::new(), None)
2922			.unwrap()
2923	}
2924
2925	pub fn insert_block(
2926		backend: &Backend<Block>,
2927		number: u64,
2928		parent_hash: H256,
2929		_changes: Option<Vec<(Vec<u8>, Vec<u8>)>>,
2930		extrinsics_root: H256,
2931		body: Vec<UncheckedXt>,
2932		transaction_index: Option<Vec<IndexOperation>>,
2933	) -> Result<H256, sp_blockchain::Error> {
2934		use sp_runtime::testing::Digest;
2935
2936		let digest = Digest::default();
2937		let mut header =
2938			Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root };
2939
2940		let block_hash = if number == 0 { Default::default() } else { parent_hash };
2941		let mut op = backend.begin_operation().unwrap();
2942		backend.begin_state_operation(&mut op, block_hash).unwrap();
2943		if let Some(index) = transaction_index {
2944			op.update_transaction_index(index).unwrap();
2945		}
2946
2947		// Insert some fake data to ensure that the block can be found in the state column.
2948		let (root, overlay) = op.old_state.storage_root(
2949			vec![(block_hash.as_ref(), Some(block_hash.as_ref()))].into_iter(),
2950			StateVersion::V1,
2951		);
2952		op.update_db_storage(overlay).unwrap();
2953		header.state_root = root.into();
2954
2955		op.set_block_data(header.clone(), Some(body), None, None, NewBlockState::Best, true)
2956			.unwrap();
2957
2958		backend.commit_operation(op)?;
2959
2960		Ok(header.hash())
2961	}
2962
2963	pub fn insert_disconnected_header(
2964		backend: &Backend<Block>,
2965		number: u64,
2966		parent_hash: H256,
2967		extrinsics_root: H256,
2968		best: bool,
2969	) -> H256 {
2970		use sp_runtime::testing::Digest;
2971
2972		let digest = Digest::default();
2973		let header =
2974			Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root };
2975
2976		let mut op = backend.begin_operation().unwrap();
2977
2978		op.set_block_data(
2979			header.clone(),
2980			Some(vec![]),
2981			None,
2982			None,
2983			if best { NewBlockState::Best } else { NewBlockState::Normal },
2984			true,
2985		)
2986		.unwrap();
2987
2988		backend.commit_operation(op).unwrap();
2989
2990		header.hash()
2991	}
2992
2993	pub fn insert_header_no_head(
2994		backend: &Backend<Block>,
2995		number: u64,
2996		parent_hash: H256,
2997		extrinsics_root: H256,
2998	) -> H256 {
2999		use sp_runtime::testing::Digest;
3000
3001		let digest = Digest::default();
3002		let mut header =
3003			Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root };
3004		let mut op = backend.begin_operation().unwrap();
3005
3006		let root = backend
3007			.state_at(parent_hash, TrieCacheContext::Untrusted)
3008			.unwrap_or_else(|_| {
3009				if parent_hash == Default::default() {
3010					backend.empty_state()
3011				} else {
3012					panic!("Unknown block: {parent_hash:?}")
3013				}
3014			})
3015			.storage_root(
3016				vec![(parent_hash.as_ref(), Some(parent_hash.as_ref()))].into_iter(),
3017				StateVersion::V1,
3018			)
3019			.0;
3020		header.state_root = root.into();
3021
3022		op.set_block_data(header.clone(), None, None, None, NewBlockState::Normal, true)
3023			.unwrap();
3024		backend.commit_operation(op).unwrap();
3025
3026		header.hash()
3027	}
3028
3029	#[test]
3030	fn block_hash_inserted_correctly() {
3031		let backing = {
3032			let db = Backend::<Block>::new_test(1, 0);
3033			for i in 0..10 {
3034				assert!(db.blockchain().hash(i).unwrap().is_none());
3035
3036				{
3037					let hash = if i == 0 {
3038						Default::default()
3039					} else {
3040						db.blockchain.hash(i - 1).unwrap().unwrap()
3041					};
3042
3043					let mut op = db.begin_operation().unwrap();
3044					db.begin_state_operation(&mut op, hash).unwrap();
3045					let header = Header {
3046						number: i,
3047						parent_hash: hash,
3048						state_root: Default::default(),
3049						digest: Default::default(),
3050						extrinsics_root: Default::default(),
3051					};
3052
3053					op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3054						.unwrap();
3055					db.commit_operation(op).unwrap();
3056				}
3057
3058				assert!(db.blockchain().hash(i).unwrap().is_some())
3059			}
3060			db.storage.db.clone()
3061		};
3062
3063		let backend = Backend::<Block>::new(
3064			DatabaseSettings {
3065				trie_cache_maximum_size: Some(16 * 1024 * 1024),
3066				state_pruning: Some(PruningMode::blocks_pruning(1)),
3067				source: DatabaseSource::Custom { db: backing, require_create_flag: false },
3068				blocks_pruning: BlocksPruning::KeepFinalized,
3069				pruning_filters: Default::default(),
3070				metrics_registry: None,
3071			},
3072			0,
3073		)
3074		.unwrap();
3075		assert_eq!(backend.blockchain().info().best_number, 9);
3076		for i in 0..10 {
3077			assert!(backend.blockchain().hash(i).unwrap().is_some())
3078		}
3079	}
3080
3081	#[test]
3082	fn set_state_data() {
3083		set_state_data_inner(StateVersion::V0);
3084		set_state_data_inner(StateVersion::V1);
3085	}
3086	fn set_state_data_inner(state_version: StateVersion) {
3087		let db = Backend::<Block>::new_test(2, 0);
3088		let hash = {
3089			let mut op = db.begin_operation().unwrap();
3090			let mut header = Header {
3091				number: 0,
3092				parent_hash: Default::default(),
3093				state_root: Default::default(),
3094				digest: Default::default(),
3095				extrinsics_root: Default::default(),
3096			};
3097
3098			let storage = vec![(vec![1, 3, 5], vec![2, 4, 6]), (vec![1, 2, 3], vec![9, 9, 9])];
3099
3100			header.state_root = op
3101				.old_state
3102				.storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..]))), state_version)
3103				.0
3104				.into();
3105			let hash = header.hash();
3106
3107			op.reset_storage(
3108				Storage {
3109					top: storage.into_iter().collect(),
3110					children_default: Default::default(),
3111				},
3112				state_version,
3113			)
3114			.unwrap();
3115			op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best, true)
3116				.unwrap();
3117
3118			db.commit_operation(op).unwrap();
3119
3120			let state = db.state_at(hash, TrieCacheContext::Untrusted).unwrap();
3121
3122			assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6]));
3123			assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9]));
3124			assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None);
3125
3126			hash
3127		};
3128
3129		{
3130			let mut op = db.begin_operation().unwrap();
3131			db.begin_state_operation(&mut op, hash).unwrap();
3132			let mut header = Header {
3133				number: 1,
3134				parent_hash: hash,
3135				state_root: Default::default(),
3136				digest: Default::default(),
3137				extrinsics_root: Default::default(),
3138			};
3139
3140			let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))];
3141
3142			let (root, overlay) = op.old_state.storage_root(
3143				storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
3144				state_version,
3145			);
3146			op.update_db_storage(overlay).unwrap();
3147			header.state_root = root.into();
3148
3149			op.update_storage(storage, Vec::new()).unwrap();
3150			op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best, true)
3151				.unwrap();
3152
3153			db.commit_operation(op).unwrap();
3154
3155			let state = db.state_at(header.hash(), TrieCacheContext::Untrusted).unwrap();
3156
3157			assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None);
3158			assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9]));
3159			assert_eq!(state.storage(&[5, 5, 5]).unwrap(), Some(vec![4, 5, 6]));
3160		}
3161	}
3162
3163	#[test]
3164	fn delete_only_when_negative_rc() {
3165		sp_tracing::try_init_simple();
3166		let state_version = StateVersion::default();
3167		let key;
3168		let backend = Backend::<Block>::new_test(1, 0);
3169
3170		let hash = {
3171			let mut op = backend.begin_operation().unwrap();
3172			backend.begin_state_operation(&mut op, Default::default()).unwrap();
3173			let mut header = Header {
3174				number: 0,
3175				parent_hash: Default::default(),
3176				state_root: Default::default(),
3177				digest: Default::default(),
3178				extrinsics_root: Default::default(),
3179			};
3180
3181			header.state_root =
3182				op.old_state.storage_root(std::iter::empty(), state_version).0.into();
3183			let hash = header.hash();
3184
3185			op.reset_storage(
3186				Storage { top: Default::default(), children_default: Default::default() },
3187				state_version,
3188			)
3189			.unwrap();
3190
3191			key = op.db_updates.insert(EMPTY_PREFIX, b"hello");
3192			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3193				.unwrap();
3194
3195			backend.commit_operation(op).unwrap();
3196			assert_eq!(
3197				backend
3198					.storage
3199					.db
3200					.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
3201					.unwrap(),
3202				&b"hello"[..]
3203			);
3204			hash
3205		};
3206
3207		let hashof1 = {
3208			let mut op = backend.begin_operation().unwrap();
3209			backend.begin_state_operation(&mut op, hash).unwrap();
3210			let mut header = Header {
3211				number: 1,
3212				parent_hash: hash,
3213				state_root: Default::default(),
3214				digest: Default::default(),
3215				extrinsics_root: Default::default(),
3216			};
3217
3218			let storage: Vec<(_, _)> = vec![];
3219
3220			header.state_root = op
3221				.old_state
3222				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
3223				.0
3224				.into();
3225			let hash = header.hash();
3226
3227			op.db_updates.insert(EMPTY_PREFIX, b"hello");
3228			op.db_updates.remove(&key, EMPTY_PREFIX);
3229			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3230				.unwrap();
3231
3232			backend.commit_operation(op).unwrap();
3233			assert_eq!(
3234				backend
3235					.storage
3236					.db
3237					.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
3238					.unwrap(),
3239				&b"hello"[..]
3240			);
3241			hash
3242		};
3243
3244		let hashof2 = {
3245			let mut op = backend.begin_operation().unwrap();
3246			backend.begin_state_operation(&mut op, hashof1).unwrap();
3247			let mut header = Header {
3248				number: 2,
3249				parent_hash: hashof1,
3250				state_root: Default::default(),
3251				digest: Default::default(),
3252				extrinsics_root: Default::default(),
3253			};
3254
3255			let storage: Vec<(_, _)> = vec![];
3256
3257			header.state_root = op
3258				.old_state
3259				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
3260				.0
3261				.into();
3262			let hash = header.hash();
3263
3264			op.db_updates.remove(&key, EMPTY_PREFIX);
3265			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3266				.unwrap();
3267
3268			backend.commit_operation(op).unwrap();
3269
3270			assert!(backend
3271				.storage
3272				.db
3273				.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
3274				.is_some());
3275			hash
3276		};
3277
3278		let hashof3 = {
3279			let mut op = backend.begin_operation().unwrap();
3280			backend.begin_state_operation(&mut op, hashof2).unwrap();
3281			let mut header = Header {
3282				number: 3,
3283				parent_hash: hashof2,
3284				state_root: Default::default(),
3285				digest: Default::default(),
3286				extrinsics_root: Default::default(),
3287			};
3288
3289			let storage: Vec<(_, _)> = vec![];
3290
3291			header.state_root = op
3292				.old_state
3293				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
3294				.0
3295				.into();
3296			let hash = header.hash();
3297
3298			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3299				.unwrap();
3300
3301			backend.commit_operation(op).unwrap();
3302			hash
3303		};
3304
3305		let hashof4 = {
3306			let mut op = backend.begin_operation().unwrap();
3307			backend.begin_state_operation(&mut op, hashof3).unwrap();
3308			let mut header = Header {
3309				number: 4,
3310				parent_hash: hashof3,
3311				state_root: Default::default(),
3312				digest: Default::default(),
3313				extrinsics_root: Default::default(),
3314			};
3315
3316			let storage: Vec<(_, _)> = vec![];
3317
3318			header.state_root = op
3319				.old_state
3320				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
3321				.0
3322				.into();
3323			let hash = header.hash();
3324
3325			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3326				.unwrap();
3327
3328			backend.commit_operation(op).unwrap();
3329			assert!(backend
3330				.storage
3331				.db
3332				.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
3333				.is_none());
3334			hash
3335		};
3336
3337		backend.finalize_block(hashof1, None).unwrap();
3338		backend.finalize_block(hashof2, None).unwrap();
3339		backend.finalize_block(hashof3, None).unwrap();
3340		backend.finalize_block(hashof4, None).unwrap();
3341		assert!(backend
3342			.storage
3343			.db
3344			.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
3345			.is_none());
3346	}
3347
3348	#[test]
3349	fn tree_route_works() {
3350		let backend = Backend::<Block>::new_test(1000, 100);
3351		let blockchain = backend.blockchain();
3352		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3353
3354		// fork from genesis: 3 prong.
3355		let a1 = insert_header(&backend, 1, block0, None, Default::default());
3356		let a2 = insert_header(&backend, 2, a1, None, Default::default());
3357		let a3 = insert_header(&backend, 3, a2, None, Default::default());
3358
3359		// fork from genesis: 2 prong.
3360		let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32]));
3361		let b2 = insert_header(&backend, 2, b1, None, Default::default());
3362
3363		{
3364			let tree_route = tree_route(blockchain, a1, a1).unwrap();
3365
3366			assert_eq!(tree_route.common_block().hash, a1);
3367			assert!(tree_route.retracted().is_empty());
3368			assert!(tree_route.enacted().is_empty());
3369		}
3370
3371		{
3372			let tree_route = tree_route(blockchain, a3, b2).unwrap();
3373
3374			assert_eq!(tree_route.common_block().hash, block0);
3375			assert_eq!(
3376				tree_route.retracted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3377				vec![a3, a2, a1]
3378			);
3379			assert_eq!(
3380				tree_route.enacted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3381				vec![b1, b2]
3382			);
3383		}
3384
3385		{
3386			let tree_route = tree_route(blockchain, a1, a3).unwrap();
3387
3388			assert_eq!(tree_route.common_block().hash, a1);
3389			assert!(tree_route.retracted().is_empty());
3390			assert_eq!(
3391				tree_route.enacted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3392				vec![a2, a3]
3393			);
3394		}
3395
3396		{
3397			let tree_route = tree_route(blockchain, a3, a1).unwrap();
3398
3399			assert_eq!(tree_route.common_block().hash, a1);
3400			assert_eq!(
3401				tree_route.retracted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3402				vec![a3, a2]
3403			);
3404			assert!(tree_route.enacted().is_empty());
3405		}
3406
3407		{
3408			let tree_route = tree_route(blockchain, a2, a2).unwrap();
3409
3410			assert_eq!(tree_route.common_block().hash, a2);
3411			assert!(tree_route.retracted().is_empty());
3412			assert!(tree_route.enacted().is_empty());
3413		}
3414	}
3415
3416	#[test]
3417	fn tree_route_child() {
3418		let backend = Backend::<Block>::new_test(1000, 100);
3419		let blockchain = backend.blockchain();
3420
3421		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3422		let block1 = insert_header(&backend, 1, block0, None, Default::default());
3423
3424		{
3425			let tree_route = tree_route(blockchain, block0, block1).unwrap();
3426
3427			assert_eq!(tree_route.common_block().hash, block0);
3428			assert!(tree_route.retracted().is_empty());
3429			assert_eq!(
3430				tree_route.enacted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3431				vec![block1]
3432			);
3433		}
3434	}
3435
3436	#[test]
3437	fn lowest_common_ancestor_works() {
3438		let backend = Backend::<Block>::new_test(1000, 100);
3439		let blockchain = backend.blockchain();
3440		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3441
3442		// fork from genesis: 3 prong.
3443		let a1 = insert_header(&backend, 1, block0, None, Default::default());
3444		let a2 = insert_header(&backend, 2, a1, None, Default::default());
3445		let a3 = insert_header(&backend, 3, a2, None, Default::default());
3446
3447		// fork from genesis: 2 prong.
3448		let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32]));
3449		let b2 = insert_header(&backend, 2, b1, None, Default::default());
3450
3451		{
3452			let lca = lowest_common_ancestor(blockchain, a3, b2).unwrap();
3453
3454			assert_eq!(lca.hash, block0);
3455			assert_eq!(lca.number, 0);
3456		}
3457
3458		{
3459			let lca = lowest_common_ancestor(blockchain, a1, a3).unwrap();
3460
3461			assert_eq!(lca.hash, a1);
3462			assert_eq!(lca.number, 1);
3463		}
3464
3465		{
3466			let lca = lowest_common_ancestor(blockchain, a3, a1).unwrap();
3467
3468			assert_eq!(lca.hash, a1);
3469			assert_eq!(lca.number, 1);
3470		}
3471
3472		{
3473			let lca = lowest_common_ancestor(blockchain, a2, a3).unwrap();
3474
3475			assert_eq!(lca.hash, a2);
3476			assert_eq!(lca.number, 2);
3477		}
3478
3479		{
3480			let lca = lowest_common_ancestor(blockchain, a2, a1).unwrap();
3481
3482			assert_eq!(lca.hash, a1);
3483			assert_eq!(lca.number, 1);
3484		}
3485
3486		{
3487			let lca = lowest_common_ancestor(blockchain, a2, a2).unwrap();
3488
3489			assert_eq!(lca.hash, a2);
3490			assert_eq!(lca.number, 2);
3491		}
3492	}
3493
3494	#[test]
3495	fn displaced_leaves_after_finalizing_works_with_disconnect() {
3496		// In this test we will create a situation that can typically happen after warp sync.
3497		// The situation looks like this:
3498		// g -> <unimported> -> a3 -> a4
3499		// Basically there is a gap of unimported blocks at some point in the chain.
3500		let backend = Backend::<Block>::new_test(1000, 100);
3501		let blockchain = backend.blockchain();
3502		let genesis_number = 0;
3503		let genesis_hash =
3504			insert_header(&backend, genesis_number, Default::default(), None, Default::default());
3505
3506		let a3_number = 3;
3507		let a3_hash = insert_disconnected_header(
3508			&backend,
3509			a3_number,
3510			H256::from([200; 32]),
3511			H256::from([1; 32]),
3512			true,
3513		);
3514
3515		let a4_number = 4;
3516		let a4_hash =
3517			insert_disconnected_header(&backend, a4_number, a3_hash, H256::from([2; 32]), true);
3518		{
3519			let displaced = blockchain
3520				.displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([200; 32]))
3521				.unwrap();
3522			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, genesis_hash]);
3523			assert_eq!(displaced.displaced_leaves, vec![(genesis_number, genesis_hash)]);
3524			assert_eq!(displaced.displaced_blocks, vec![]);
3525		}
3526
3527		{
3528			let displaced = blockchain
3529				.displaced_leaves_after_finalizing(a4_hash, a4_number, a3_hash)
3530				.unwrap();
3531			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, genesis_hash]);
3532			assert_eq!(displaced.displaced_leaves, vec![(genesis_number, genesis_hash)]);
3533			assert_eq!(displaced.displaced_blocks, vec![]);
3534		}
3535
3536		// Import block a1 which has the genesis block as parent.
3537		// g -> a1 -> <unimported> -> a3(f) -> a4
3538		let a1_number = 1;
3539		let a1_hash = insert_disconnected_header(
3540			&backend,
3541			a1_number,
3542			genesis_hash,
3543			H256::from([123; 32]),
3544			false,
3545		);
3546		{
3547			let displaced = blockchain
3548				.displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([2; 32]))
3549				.unwrap();
3550			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, a1_hash]);
3551			assert_eq!(displaced.displaced_leaves, vec![]);
3552			assert_eq!(displaced.displaced_blocks, vec![]);
3553		}
3554
3555		// Import block b1 which has the genesis block as parent.
3556		// g -> a1 -> <unimported> -> a3(f) -> a4
3557		//  \-> b1
3558		let b1_number = 1;
3559		let b1_hash = insert_disconnected_header(
3560			&backend,
3561			b1_number,
3562			genesis_hash,
3563			H256::from([124; 32]),
3564			false,
3565		);
3566		{
3567			let displaced = blockchain
3568				.displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([2; 32]))
3569				.unwrap();
3570			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, a1_hash, b1_hash]);
3571			assert_eq!(displaced.displaced_leaves, vec![]);
3572			assert_eq!(displaced.displaced_blocks, vec![]);
3573		}
3574
3575		// If branch of b blocks is higher in number than a branch, we
3576		// should still not prune disconnected leafs.
3577		// g -> a1 -> <unimported> -> a3(f) -> a4
3578		//  \-> b1 -> b2 ----------> b3 ----> b4 -> b5
3579		let b2_number = 2;
3580		let b2_hash =
3581			insert_disconnected_header(&backend, b2_number, b1_hash, H256::from([40; 32]), false);
3582		let b3_number = 3;
3583		let b3_hash =
3584			insert_disconnected_header(&backend, b3_number, b2_hash, H256::from([41; 32]), false);
3585		let b4_number = 4;
3586		let b4_hash =
3587			insert_disconnected_header(&backend, b4_number, b3_hash, H256::from([42; 32]), false);
3588		let b5_number = 5;
3589		let b5_hash =
3590			insert_disconnected_header(&backend, b5_number, b4_hash, H256::from([43; 32]), false);
3591		{
3592			let displaced = blockchain
3593				.displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([2; 32]))
3594				.unwrap();
3595			assert_eq!(blockchain.leaves().unwrap(), vec![b5_hash, a4_hash, a1_hash]);
3596			assert_eq!(displaced.displaced_leaves, vec![]);
3597			assert_eq!(displaced.displaced_blocks, vec![]);
3598		}
3599
3600		// Even though there is a disconnect, diplace should still detect
3601		// branches above the block gap.
3602		//                              /-> c4
3603		// g -> a1 -> <unimported> -> a3 -> a4(f)
3604		//  \-> b1 -> b2 ----------> b3 -> b4 -> b5
3605		let c4_number = 4;
3606		let c4_hash =
3607			insert_disconnected_header(&backend, c4_number, a3_hash, H256::from([44; 32]), false);
3608		{
3609			let displaced = blockchain
3610				.displaced_leaves_after_finalizing(a4_hash, a4_number, a3_hash)
3611				.unwrap();
3612			assert_eq!(blockchain.leaves().unwrap(), vec![b5_hash, a4_hash, c4_hash, a1_hash]);
3613			assert_eq!(displaced.displaced_leaves, vec![(c4_number, c4_hash)]);
3614			assert_eq!(displaced.displaced_blocks, vec![c4_hash]);
3615		}
3616	}
3617
3618	#[test]
3619	fn disconnected_blocks_do_not_become_leaves_and_warp_sync_scenario() {
3620		// Simulate a realistic case:
3621		//
3622		// 1. Import genesis (block #0) normally — becomes a leaf.
3623		// 2. Import warp sync proof blocks at #5, #10, #15 without leaf registration. Their parents
3624		//    are NOT in the DB. They must NOT appear as leaves.
3625		// 3. Import block #20 as Final. Its parent (#19) is not in the DB. Being Final, it updates
3626		//    finalized number to 20.
3627		// 4. Import blocks #1..#19 with Normal state (gap sync). Since last_finalized_num is now 20
3628		//    and each block number < 20, the leaf condition (number > last_finalized_num ||
3629		//    last_finalized_num.is_zero()) is FALSE — they must NOT become leaves.
3630		// 5. Assert throughout and verify displaced_leaves_after_finalizing works cleanly with no
3631		//    disconnected proof blocks in the displaced list.
3632
3633		let backend = Backend::<Block>::new_test(1000, 100);
3634		let blockchain = backend.blockchain();
3635
3636		let insert_block_raw = |number: u64,
3637		                        parent_hash: H256,
3638		                        ext_root: H256,
3639		                        state: NewBlockState,
3640		                        register_as_leaf: bool|
3641		 -> H256 {
3642			use sp_runtime::testing::Digest;
3643			let digest = Digest::default();
3644			let header = Header {
3645				number,
3646				parent_hash,
3647				state_root: Default::default(),
3648				digest,
3649				extrinsics_root: ext_root,
3650			};
3651			let mut op = backend.begin_operation().unwrap();
3652			op.set_block_data(header.clone(), Some(vec![]), None, None, state, register_as_leaf)
3653				.unwrap();
3654			backend.commit_operation(op).unwrap();
3655			header.hash()
3656		};
3657
3658		// --- Step 1: import genesis ---
3659		let genesis_hash = insert_header(&backend, 0, Default::default(), None, Default::default());
3660		assert_eq!(blockchain.leaves().unwrap(), vec![genesis_hash]);
3661
3662		// --- Step 2: import warp sync proof blocks without leaf registration ---
3663		// These simulate authority-set-change blocks from the warp sync proof.
3664		// Their parents are NOT in the DB.
3665		let _proof5_hash = insert_block_raw(
3666			5,
3667			H256::from([5; 32]),
3668			H256::from([50; 32]),
3669			NewBlockState::Normal,
3670			false,
3671		);
3672		let _proof10_hash = insert_block_raw(
3673			10,
3674			H256::from([10; 32]),
3675			H256::from([100; 32]),
3676			NewBlockState::Normal,
3677			false,
3678		);
3679		let _proof15_hash = insert_block_raw(
3680			15,
3681			H256::from([15; 32]),
3682			H256::from([150; 32]),
3683			NewBlockState::Normal,
3684			false,
3685		);
3686
3687		// Leaves must still only contain genesis.
3688		assert_eq!(blockchain.leaves().unwrap(), vec![genesis_hash]);
3689
3690		// The disconnected blocks should still be retrievable from the DB.
3691		assert!(blockchain.header(_proof5_hash).unwrap().is_some());
3692		assert!(blockchain.header(_proof10_hash).unwrap().is_some());
3693		assert!(blockchain.header(_proof15_hash).unwrap().is_some());
3694
3695		// --- Step 3: import warp sync target block #20 as Final ---
3696		// Parent (#19) is not in the DB. Use the same low-level approach but with
3697		// NewBlockState::Final. Being Final, it will be set as best + finalized.
3698		let block20_hash = insert_block_raw(
3699			20,
3700			H256::from([19; 32]),
3701			H256::from([200; 32]),
3702			NewBlockState::Final,
3703			true,
3704		);
3705
3706		// Block #20 should now be a leaf (it's best and finalized).
3707		let leaves = blockchain.leaves().unwrap();
3708		assert!(leaves.contains(&block20_hash));
3709		// Verify finalized number was updated to 20.
3710		assert_eq!(blockchain.info().finalized_number, 20);
3711		assert_eq!(blockchain.info().finalized_hash, block20_hash);
3712		// Disconnected proof blocks must still not be leaves.
3713		assert!(!leaves.contains(&_proof5_hash));
3714		assert!(!leaves.contains(&_proof10_hash));
3715		assert!(!leaves.contains(&_proof15_hash));
3716
3717		// --- Step 4: import gap sync blocks #1..#19 with Normal state ---
3718		// Since last_finalized_num is 20, each block with number < 20 should NOT
3719		// become a leaf (the condition `number > last_finalized_num` is false).
3720		// Build the chain: genesis -> #1 -> #2 -> ... -> #19.
3721		let mut prev_hash = genesis_hash;
3722		let mut gap_hashes = Vec::new();
3723		for n in 1..=19 {
3724			let h = insert_disconnected_header(&backend, n, prev_hash, Default::default(), false);
3725			gap_hashes.push(h);
3726			prev_hash = h;
3727		}
3728
3729		// Verify gap sync blocks did NOT create new leaves.
3730		let leaves = blockchain.leaves().unwrap();
3731		for (i, gap_hash) in gap_hashes.iter().enumerate() {
3732			assert!(
3733				!leaves.contains(gap_hash),
3734				"Gap sync block #{} should not be a leaf, but it is",
3735				i + 1,
3736			);
3737		}
3738		// Block #20 should still be a leaf.
3739		assert!(leaves.contains(&block20_hash));
3740		// Disconnected proof blocks must still not be leaves.
3741		assert!(!leaves.contains(&_proof5_hash));
3742		assert!(!leaves.contains(&_proof10_hash));
3743		assert!(!leaves.contains(&_proof15_hash));
3744
3745		// --- Step 5: verify displaced_leaves_after_finalizing works cleanly ---
3746		// Call it for block #20 to verify no disconnected proof blocks appear
3747		// in the displaced list and it completes without errors.
3748		{
3749			let displaced = blockchain
3750				.displaced_leaves_after_finalizing(
3751					block20_hash,
3752					20,
3753					H256::from([19; 32]), // parent hash of block #20
3754				)
3755				.unwrap();
3756			// Disconnected proof blocks were never leaves, so they must not
3757			// appear in displaced_leaves.
3758			assert!(!displaced.displaced_leaves.iter().any(|(_, h)| *h == _proof5_hash),);
3759			assert!(!displaced.displaced_leaves.iter().any(|(_, h)| *h == _proof10_hash),);
3760			assert!(!displaced.displaced_leaves.iter().any(|(_, h)| *h == _proof15_hash),);
3761			// None of the gap sync blocks should be displaced leaves either
3762			// (they were never added as leaves).
3763			for gap_hash in &gap_hashes {
3764				assert!(!displaced.displaced_leaves.iter().any(|(_, h)| h == gap_hash),);
3765			}
3766		}
3767	}
3768
3769	#[test]
3770	fn displaced_leaves_after_finalizing_works() {
3771		let backend = Backend::<Block>::new_test(1000, 100);
3772		let blockchain = backend.blockchain();
3773		let genesis_number = 0;
3774		let genesis_hash =
3775			insert_header(&backend, genesis_number, Default::default(), None, Default::default());
3776
3777		// fork from genesis: 3 prong.
3778		// block 0 -> a1 -> a2 -> a3
3779		//        \
3780		//         -> b1 -> b2 -> c1 -> c2
3781		//              \
3782		//               -> d1 -> d2
3783		let a1_number = 1;
3784		let a1_hash = insert_header(&backend, a1_number, genesis_hash, None, Default::default());
3785		let a2_number = 2;
3786		let a2_hash = insert_header(&backend, a2_number, a1_hash, None, Default::default());
3787		let a3_number = 3;
3788		let a3_hash = insert_header(&backend, a3_number, a2_hash, None, Default::default());
3789
3790		{
3791			let displaced = blockchain
3792				.displaced_leaves_after_finalizing(genesis_hash, genesis_number, Default::default())
3793				.unwrap();
3794			assert_eq!(displaced.displaced_leaves, vec![]);
3795			assert_eq!(displaced.displaced_blocks, vec![]);
3796		}
3797		{
3798			let displaced_a1 = blockchain
3799				.displaced_leaves_after_finalizing(a1_hash, a1_number, genesis_hash)
3800				.unwrap();
3801			assert_eq!(displaced_a1.displaced_leaves, vec![]);
3802			assert_eq!(displaced_a1.displaced_blocks, vec![]);
3803
3804			let displaced_a2 = blockchain
3805				.displaced_leaves_after_finalizing(a2_hash, a2_number, a1_hash)
3806				.unwrap();
3807			assert_eq!(displaced_a2.displaced_leaves, vec![]);
3808			assert_eq!(displaced_a2.displaced_blocks, vec![]);
3809
3810			let displaced_a3 = blockchain
3811				.displaced_leaves_after_finalizing(a3_hash, a3_number, a2_hash)
3812				.unwrap();
3813			assert_eq!(displaced_a3.displaced_leaves, vec![]);
3814			assert_eq!(displaced_a3.displaced_blocks, vec![]);
3815		}
3816		{
3817			// Finalized block is above leaves and not imported yet.
3818			// We will not be able to make a connection,
3819			// nothing can be marked as displaced.
3820			let displaced = blockchain
3821				.displaced_leaves_after_finalizing(H256::from([57; 32]), 10, H256::from([56; 32]))
3822				.unwrap();
3823			assert_eq!(displaced.displaced_leaves, vec![]);
3824			assert_eq!(displaced.displaced_blocks, vec![]);
3825		}
3826
3827		// fork from genesis: 2 prong.
3828		let b1_number = 1;
3829		let b1_hash = insert_header(&backend, b1_number, genesis_hash, None, H256::from([1; 32]));
3830		let b2_number = 2;
3831		let b2_hash = insert_header(&backend, b2_number, b1_hash, None, Default::default());
3832
3833		// fork from b2.
3834		let c1_number = 3;
3835		let c1_hash = insert_header(&backend, c1_number, b2_hash, None, H256::from([2; 32]));
3836		let c2_number = 4;
3837		let c2_hash = insert_header(&backend, c2_number, c1_hash, None, Default::default());
3838
3839		// fork from b1.
3840		let d1_number = 2;
3841		let d1_hash = insert_header(&backend, d1_number, b1_hash, None, H256::from([3; 32]));
3842		let d2_number = 3;
3843		let d2_hash = insert_header(&backend, d2_number, d1_hash, None, Default::default());
3844
3845		{
3846			let displaced_a1 = blockchain
3847				.displaced_leaves_after_finalizing(a1_hash, a1_number, genesis_hash)
3848				.unwrap();
3849			assert_eq!(
3850				displaced_a1.displaced_leaves,
3851				vec![(c2_number, c2_hash), (d2_number, d2_hash)]
3852			);
3853			let mut displaced_blocks = vec![b1_hash, b2_hash, c1_hash, c2_hash, d1_hash, d2_hash];
3854			displaced_blocks.sort();
3855			assert_eq!(displaced_a1.displaced_blocks, displaced_blocks);
3856
3857			let displaced_a2 = blockchain
3858				.displaced_leaves_after_finalizing(a2_hash, a2_number, a1_hash)
3859				.unwrap();
3860			assert_eq!(displaced_a1.displaced_leaves, displaced_a2.displaced_leaves);
3861			assert_eq!(displaced_a1.displaced_blocks, displaced_a2.displaced_blocks);
3862
3863			let displaced_a3 = blockchain
3864				.displaced_leaves_after_finalizing(a3_hash, a3_number, a2_hash)
3865				.unwrap();
3866			assert_eq!(displaced_a1.displaced_leaves, displaced_a3.displaced_leaves);
3867			assert_eq!(displaced_a1.displaced_blocks, displaced_a3.displaced_blocks);
3868		}
3869		{
3870			let displaced = blockchain
3871				.displaced_leaves_after_finalizing(b1_hash, b1_number, genesis_hash)
3872				.unwrap();
3873			assert_eq!(displaced.displaced_leaves, vec![(a3_number, a3_hash)]);
3874			let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash];
3875			displaced_blocks.sort();
3876			assert_eq!(displaced.displaced_blocks, displaced_blocks);
3877		}
3878		{
3879			let displaced = blockchain
3880				.displaced_leaves_after_finalizing(b2_hash, b2_number, b1_hash)
3881				.unwrap();
3882			assert_eq!(
3883				displaced.displaced_leaves,
3884				vec![(a3_number, a3_hash), (d2_number, d2_hash)]
3885			);
3886			let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash, d1_hash, d2_hash];
3887			displaced_blocks.sort();
3888			assert_eq!(displaced.displaced_blocks, displaced_blocks);
3889		}
3890		{
3891			let displaced = blockchain
3892				.displaced_leaves_after_finalizing(c2_hash, c2_number, c1_hash)
3893				.unwrap();
3894			assert_eq!(
3895				displaced.displaced_leaves,
3896				vec![(a3_number, a3_hash), (d2_number, d2_hash)]
3897			);
3898			let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash, d1_hash, d2_hash];
3899			displaced_blocks.sort();
3900			assert_eq!(displaced.displaced_blocks, displaced_blocks);
3901		}
3902	}
3903
3904	#[test]
3905	fn test_tree_route_regression() {
3906		// NOTE: this is a test for a regression introduced in #3665, the result
3907		// of tree_route would be erroneously computed, since it was taking into
3908		// account the `ancestor` in `CachedHeaderMetadata` for the comparison.
3909		// in this test we simulate the same behavior with the side-effect
3910		// triggering the issue being eviction of a previously fetched record
3911		// from the cache, therefore this test is dependent on the LRU cache
3912		// size for header metadata, which is currently set to 5000 elements.
3913		let backend = Backend::<Block>::new_test(10000, 10000);
3914		let blockchain = backend.blockchain();
3915
3916		let genesis = insert_header(&backend, 0, Default::default(), None, Default::default());
3917
3918		let block100 = (1..=100).fold(genesis, |parent, n| {
3919			insert_header(&backend, n, parent, None, Default::default())
3920		});
3921
3922		let block7000 = (101..=7000).fold(block100, |parent, n| {
3923			insert_header(&backend, n, parent, None, Default::default())
3924		});
3925
3926		// This will cause the ancestor of `block100` to be set to `genesis` as a side-effect.
3927		lowest_common_ancestor(blockchain, genesis, block100).unwrap();
3928
3929		// While traversing the tree we will have to do 6900 calls to
3930		// `header_metadata`, which will make sure we will exhaust our cache
3931		// which only takes 5000 elements. In particular, the `CachedHeaderMetadata` struct for
3932		// block #100 will be evicted and will get a new value (with ancestor set to its parent).
3933		let tree_route = tree_route(blockchain, block100, block7000).unwrap();
3934
3935		assert!(tree_route.retracted().is_empty());
3936	}
3937
3938	#[test]
3939	fn test_leaves_with_complex_block_tree() {
3940		let backend: Arc<Backend<substrate_test_runtime_client::runtime::Block>> =
3941			Arc::new(Backend::new_test(20, 20));
3942		substrate_test_runtime_client::trait_tests::test_leaves_for_backend(backend);
3943	}
3944
3945	#[test]
3946	fn test_children_with_complex_block_tree() {
3947		let backend: Arc<Backend<substrate_test_runtime_client::runtime::Block>> =
3948			Arc::new(Backend::new_test(20, 20));
3949		substrate_test_runtime_client::trait_tests::test_children_for_backend(backend);
3950	}
3951
3952	#[test]
3953	fn test_blockchain_query_by_number_gets_canonical() {
3954		let backend: Arc<Backend<substrate_test_runtime_client::runtime::Block>> =
3955			Arc::new(Backend::new_test(20, 20));
3956		substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical(
3957			backend,
3958		);
3959	}
3960
3961	#[test]
3962	fn test_leaves_pruned_on_finality() {
3963		//   / 1b - 2b - 3b
3964		// 0 - 1a - 2a
3965		//   \ 1c
3966		let backend: Backend<Block> = Backend::new_test(10, 10);
3967		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3968
3969		let block1_a = insert_header(&backend, 1, block0, None, Default::default());
3970		let block1_b = insert_header(&backend, 1, block0, None, [1; 32].into());
3971		let block1_c = insert_header(&backend, 1, block0, None, [2; 32].into());
3972
3973		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block1_a, block1_b, block1_c]);
3974
3975		let block2_a = insert_header(&backend, 2, block1_a, None, Default::default());
3976		let block2_b = insert_header(&backend, 2, block1_b, None, Default::default());
3977
3978		let block3_b = insert_header(&backend, 3, block2_b, None, [3; 32].into());
3979
3980		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block3_b, block2_a, block1_c]);
3981
3982		backend.finalize_block(block1_a, None).unwrap();
3983		backend.finalize_block(block2_a, None).unwrap();
3984
3985		// All leaves are pruned that are known to not belong to canonical branch
3986		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]);
3987	}
3988
3989	#[test]
3990	fn test_aux() {
3991		let backend: Backend<substrate_test_runtime_client::runtime::Block> =
3992			Backend::new_test(0, 0);
3993		assert!(backend.get_aux(b"test").unwrap().is_none());
3994		backend.insert_aux(&[(&b"test"[..], &b"hello"[..])], &[]).unwrap();
3995		assert_eq!(b"hello", &backend.get_aux(b"test").unwrap().unwrap()[..]);
3996		backend.insert_aux(&[], &[&b"test"[..]]).unwrap();
3997		assert!(backend.get_aux(b"test").unwrap().is_none());
3998	}
3999
4000	#[test]
4001	fn test_finalize_block_with_justification() {
4002		use sc_client_api::blockchain::Backend as BlockChainBackend;
4003
4004		let backend = Backend::<Block>::new_test(10, 10);
4005
4006		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4007		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4008
4009		let justification = Some((CONS0_ENGINE_ID, vec![1, 2, 3]));
4010		backend.finalize_block(block1, justification.clone()).unwrap();
4011
4012		assert_eq!(
4013			backend.blockchain().justifications(block1).unwrap(),
4014			justification.map(Justifications::from),
4015		);
4016	}
4017
4018	#[test]
4019	fn test_append_justification_to_finalized_block() {
4020		use sc_client_api::blockchain::Backend as BlockChainBackend;
4021
4022		let backend = Backend::<Block>::new_test(10, 10);
4023
4024		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4025		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4026
4027		let just0 = (CONS0_ENGINE_ID, vec![1, 2, 3]);
4028		backend.finalize_block(block1, Some(just0.clone().into())).unwrap();
4029
4030		let just1 = (CONS1_ENGINE_ID, vec![4, 5]);
4031		backend.append_justification(block1, just1.clone()).unwrap();
4032
4033		let just2 = (CONS1_ENGINE_ID, vec![6, 7]);
4034		assert!(matches!(
4035			backend.append_justification(block1, just2),
4036			Err(ClientError::BadJustification(_))
4037		));
4038
4039		let justifications = {
4040			let mut just = Justifications::from(just0);
4041			just.append(just1);
4042			just
4043		};
4044		assert_eq!(backend.blockchain().justifications(block1).unwrap(), Some(justifications),);
4045	}
4046
4047	#[test]
4048	fn test_finalize_multiple_blocks_in_single_op() {
4049		let backend = Backend::<Block>::new_test(10, 10);
4050
4051		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4052		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4053		let block2 = insert_header(&backend, 2, block1, None, Default::default());
4054		let block3 = insert_header(&backend, 3, block2, None, Default::default());
4055		let block4 = insert_header(&backend, 4, block3, None, Default::default());
4056		{
4057			let mut op = backend.begin_operation().unwrap();
4058			backend.begin_state_operation(&mut op, block0).unwrap();
4059			op.mark_finalized(block1, None).unwrap();
4060			op.mark_finalized(block2, None).unwrap();
4061			backend.commit_operation(op).unwrap();
4062		}
4063		{
4064			let mut op = backend.begin_operation().unwrap();
4065			backend.begin_state_operation(&mut op, block2).unwrap();
4066			op.mark_finalized(block3, None).unwrap();
4067			op.mark_finalized(block4, None).unwrap();
4068			backend.commit_operation(op).unwrap();
4069		}
4070	}
4071
4072	#[test]
4073	fn storage_hash_is_cached_correctly() {
4074		let state_version = StateVersion::default();
4075		let backend = Backend::<Block>::new_test(10, 10);
4076
4077		let hash0 = {
4078			let mut op = backend.begin_operation().unwrap();
4079			backend.begin_state_operation(&mut op, Default::default()).unwrap();
4080			let mut header = Header {
4081				number: 0,
4082				parent_hash: Default::default(),
4083				state_root: Default::default(),
4084				digest: Default::default(),
4085				extrinsics_root: Default::default(),
4086			};
4087
4088			let storage = vec![(b"test".to_vec(), b"test".to_vec())];
4089
4090			header.state_root = op
4091				.old_state
4092				.storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..]))), state_version)
4093				.0
4094				.into();
4095			let hash = header.hash();
4096
4097			op.reset_storage(
4098				Storage {
4099					top: storage.into_iter().collect(),
4100					children_default: Default::default(),
4101				},
4102				state_version,
4103			)
4104			.unwrap();
4105			op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best, true)
4106				.unwrap();
4107
4108			backend.commit_operation(op).unwrap();
4109
4110			hash
4111		};
4112
4113		let block0_hash = backend
4114			.state_at(hash0, TrieCacheContext::Untrusted)
4115			.unwrap()
4116			.storage_hash(&b"test"[..])
4117			.unwrap();
4118
4119		let hash1 = {
4120			let mut op = backend.begin_operation().unwrap();
4121			backend.begin_state_operation(&mut op, hash0).unwrap();
4122			let mut header = Header {
4123				number: 1,
4124				parent_hash: hash0,
4125				state_root: Default::default(),
4126				digest: Default::default(),
4127				extrinsics_root: Default::default(),
4128			};
4129
4130			let storage = vec![(b"test".to_vec(), Some(b"test2".to_vec()))];
4131
4132			let (root, overlay) = op.old_state.storage_root(
4133				storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
4134				state_version,
4135			);
4136			op.update_db_storage(overlay).unwrap();
4137			header.state_root = root.into();
4138			let hash = header.hash();
4139
4140			op.update_storage(storage, Vec::new()).unwrap();
4141			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Normal, true)
4142				.unwrap();
4143
4144			backend.commit_operation(op).unwrap();
4145
4146			hash
4147		};
4148
4149		{
4150			let header = backend.blockchain().header(hash1).unwrap().unwrap();
4151			let mut op = backend.begin_operation().unwrap();
4152			op.set_block_data(header, None, None, None, NewBlockState::Best, true).unwrap();
4153			backend.commit_operation(op).unwrap();
4154		}
4155
4156		let block1_hash = backend
4157			.state_at(hash1, TrieCacheContext::Untrusted)
4158			.unwrap()
4159			.storage_hash(&b"test"[..])
4160			.unwrap();
4161
4162		assert_ne!(block0_hash, block1_hash);
4163	}
4164
4165	#[test]
4166	fn test_finalize_non_sequential() {
4167		let backend = Backend::<Block>::new_test(10, 10);
4168
4169		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4170		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4171		let block2 = insert_header(&backend, 2, block1, None, Default::default());
4172		{
4173			let mut op = backend.begin_operation().unwrap();
4174			backend.begin_state_operation(&mut op, block0).unwrap();
4175			op.mark_finalized(block2, None).unwrap();
4176			backend.commit_operation(op).unwrap_err();
4177		}
4178	}
4179
4180	#[test]
4181	fn prune_blocks_on_finalize() {
4182		let pruning_modes =
4183			vec![BlocksPruning::Some(2), BlocksPruning::KeepFinalized, BlocksPruning::KeepAll];
4184
4185		for pruning_mode in pruning_modes {
4186			let backend = Backend::<Block>::new_test_with_tx_storage(pruning_mode, 0);
4187			let mut blocks = Vec::new();
4188			let mut prev_hash = Default::default();
4189			for i in 0..5 {
4190				let hash = insert_block(
4191					&backend,
4192					i,
4193					prev_hash,
4194					None,
4195					Default::default(),
4196					vec![UncheckedXt::new_transaction(i.into(), ())],
4197					None,
4198				)
4199				.unwrap();
4200				blocks.push(hash);
4201				prev_hash = hash;
4202			}
4203
4204			{
4205				let mut op = backend.begin_operation().unwrap();
4206				backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4207				for i in 1..5 {
4208					op.mark_finalized(blocks[i], None).unwrap();
4209				}
4210				backend.commit_operation(op).unwrap();
4211			}
4212			let bc = backend.blockchain();
4213
4214			if matches!(pruning_mode, BlocksPruning::Some(_)) {
4215				assert_eq!(None, bc.body(blocks[0]).unwrap());
4216				assert_eq!(None, bc.body(blocks[1]).unwrap());
4217				assert_eq!(None, bc.body(blocks[2]).unwrap());
4218				assert_eq!(
4219					Some(vec![UncheckedXt::new_transaction(3.into(), ())]),
4220					bc.body(blocks[3]).unwrap()
4221				);
4222				assert_eq!(
4223					Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
4224					bc.body(blocks[4]).unwrap()
4225				);
4226			} else {
4227				for i in 0..5 {
4228					assert_eq!(
4229						Some(vec![UncheckedXt::new_transaction((i as u64).into(), ())]),
4230						bc.body(blocks[i]).unwrap()
4231					);
4232				}
4233			}
4234		}
4235	}
4236
4237	#[test]
4238	fn prune_blocks_on_finalize_with_fork() {
4239		sp_tracing::try_init_simple();
4240
4241		let pruning_modes =
4242			vec![BlocksPruning::Some(2), BlocksPruning::KeepFinalized, BlocksPruning::KeepAll];
4243
4244		for pruning in pruning_modes {
4245			let backend = Backend::<Block>::new_test_with_tx_storage(pruning, 10);
4246			let mut blocks = Vec::new();
4247			let mut prev_hash = Default::default();
4248			for i in 0..5 {
4249				let hash = insert_block(
4250					&backend,
4251					i,
4252					prev_hash,
4253					None,
4254					Default::default(),
4255					vec![UncheckedXt::new_transaction(i.into(), ())],
4256					None,
4257				)
4258				.unwrap();
4259				blocks.push(hash);
4260				prev_hash = hash;
4261			}
4262
4263			// insert a fork at block 2
4264			let fork_hash_root = insert_block(
4265				&backend,
4266				2,
4267				blocks[1],
4268				None,
4269				H256::random(),
4270				vec![UncheckedXt::new_transaction(2.into(), ())],
4271				None,
4272			)
4273			.unwrap();
4274			insert_block(
4275				&backend,
4276				3,
4277				fork_hash_root,
4278				None,
4279				H256::random(),
4280				vec![
4281					UncheckedXt::new_transaction(3.into(), ()),
4282					UncheckedXt::new_transaction(11.into(), ()),
4283				],
4284				None,
4285			)
4286			.unwrap();
4287			let mut op = backend.begin_operation().unwrap();
4288			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4289			op.mark_head(blocks[4]).unwrap();
4290			backend.commit_operation(op).unwrap();
4291
4292			let bc = backend.blockchain();
4293			assert_eq!(
4294				Some(vec![UncheckedXt::new_transaction(2.into(), ())]),
4295				bc.body(fork_hash_root).unwrap()
4296			);
4297
4298			for i in 1..5 {
4299				let mut op = backend.begin_operation().unwrap();
4300				backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4301				op.mark_finalized(blocks[i], None).unwrap();
4302				backend.commit_operation(op).unwrap();
4303			}
4304
4305			if matches!(pruning, BlocksPruning::Some(_)) {
4306				assert_eq!(None, bc.body(blocks[0]).unwrap());
4307				assert_eq!(None, bc.body(blocks[1]).unwrap());
4308				assert_eq!(None, bc.body(blocks[2]).unwrap());
4309
4310				assert_eq!(
4311					Some(vec![UncheckedXt::new_transaction(3.into(), ())]),
4312					bc.body(blocks[3]).unwrap()
4313				);
4314				assert_eq!(
4315					Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
4316					bc.body(blocks[4]).unwrap()
4317				);
4318			} else {
4319				for i in 0..5 {
4320					assert_eq!(
4321						Some(vec![UncheckedXt::new_transaction((i as u64).into(), ())]),
4322						bc.body(blocks[i]).unwrap()
4323					);
4324				}
4325			}
4326
4327			if matches!(pruning, BlocksPruning::KeepAll) {
4328				assert_eq!(
4329					Some(vec![UncheckedXt::new_transaction(2.into(), ())]),
4330					bc.body(fork_hash_root).unwrap()
4331				);
4332			} else {
4333				assert_eq!(None, bc.body(fork_hash_root).unwrap());
4334			}
4335
4336			assert_eq!(bc.info().best_number, 4);
4337			for i in 0..5 {
4338				assert!(bc.hash(i).unwrap().is_some());
4339			}
4340		}
4341	}
4342
4343	#[test]
4344	fn prune_blocks_on_finalize_and_reorg() {
4345		// 	0 - 1b
4346		// 	\ - 1a - 2a - 3a
4347		// 	     \ - 2b
4348
4349		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(10), 10);
4350
4351		let make_block = |index, parent, val: u64| {
4352			insert_block(
4353				&backend,
4354				index,
4355				parent,
4356				None,
4357				H256::random(),
4358				vec![UncheckedXt::new_transaction(val.into(), ())],
4359				None,
4360			)
4361			.unwrap()
4362		};
4363
4364		let block_0 = make_block(0, Default::default(), 0x00);
4365		let block_1a = make_block(1, block_0, 0x1a);
4366		let block_1b = make_block(1, block_0, 0x1b);
4367		let block_2a = make_block(2, block_1a, 0x2a);
4368		let block_2b = make_block(2, block_1a, 0x2b);
4369		let block_3a = make_block(3, block_2a, 0x3a);
4370
4371		// Make sure 1b is head
4372		let mut op = backend.begin_operation().unwrap();
4373		backend.begin_state_operation(&mut op, block_0).unwrap();
4374		op.mark_head(block_1b).unwrap();
4375		backend.commit_operation(op).unwrap();
4376
4377		// Finalize 3a
4378		let mut op = backend.begin_operation().unwrap();
4379		backend.begin_state_operation(&mut op, block_0).unwrap();
4380		op.mark_head(block_3a).unwrap();
4381		op.mark_finalized(block_1a, None).unwrap();
4382		op.mark_finalized(block_2a, None).unwrap();
4383		op.mark_finalized(block_3a, None).unwrap();
4384		backend.commit_operation(op).unwrap();
4385
4386		let bc = backend.blockchain();
4387		assert_eq!(None, bc.body(block_1b).unwrap());
4388		assert_eq!(None, bc.body(block_2b).unwrap());
4389		assert_eq!(
4390			Some(vec![UncheckedXt::new_transaction(0x00.into(), ())]),
4391			bc.body(block_0).unwrap()
4392		);
4393		assert_eq!(
4394			Some(vec![UncheckedXt::new_transaction(0x1a.into(), ())]),
4395			bc.body(block_1a).unwrap()
4396		);
4397		assert_eq!(
4398			Some(vec![UncheckedXt::new_transaction(0x2a.into(), ())]),
4399			bc.body(block_2a).unwrap()
4400		);
4401		assert_eq!(
4402			Some(vec![UncheckedXt::new_transaction(0x3a.into(), ())]),
4403			bc.body(block_3a).unwrap()
4404		);
4405	}
4406
4407	#[test]
4408	fn indexed_data_block_body() {
4409		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
4410
4411		let x0 = UncheckedXt::new_transaction(0.into(), ()).encode();
4412		let x1 = UncheckedXt::new_transaction(1.into(), ()).encode();
4413		let x0_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x0[1..]);
4414		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
4415		let index = vec![
4416			IndexOperation::Insert {
4417				extrinsic: 0,
4418				hash: x0_hash.as_ref().to_vec(),
4419				size: (x0.len() - 1) as u32,
4420			},
4421			IndexOperation::Insert {
4422				extrinsic: 1,
4423				hash: x1_hash.as_ref().to_vec(),
4424				size: (x1.len() - 1) as u32,
4425			},
4426		];
4427		let hash = insert_block(
4428			&backend,
4429			0,
4430			Default::default(),
4431			None,
4432			Default::default(),
4433			vec![
4434				UncheckedXt::new_transaction(0.into(), ()),
4435				UncheckedXt::new_transaction(1.into(), ()),
4436			],
4437			Some(index),
4438		)
4439		.unwrap();
4440		let bc = backend.blockchain();
4441		assert_eq!(bc.indexed_transaction(x0_hash).unwrap().unwrap(), &x0[1..]);
4442		assert_eq!(bc.indexed_transaction(x1_hash).unwrap().unwrap(), &x1[1..]);
4443
4444		let hashof0 = bc.info().genesis_hash;
4445		// Push one more blocks and make sure block is pruned and transaction index is cleared.
4446		let block1 =
4447			insert_block(&backend, 1, hash, None, Default::default(), vec![], None).unwrap();
4448		backend.finalize_block(block1, None).unwrap();
4449		assert_eq!(bc.body(hashof0).unwrap(), None);
4450		assert_eq!(bc.indexed_transaction(x0_hash).unwrap(), None);
4451		assert_eq!(bc.indexed_transaction(x1_hash).unwrap(), None);
4452	}
4453
4454	#[test]
4455	fn index_invalid_size() {
4456		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
4457
4458		let x0 = UncheckedXt::new_transaction(0.into(), ()).encode();
4459		let x1 = UncheckedXt::new_transaction(1.into(), ()).encode();
4460
4461		let x0_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x0[..]);
4462		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[..]);
4463		let index = vec![
4464			IndexOperation::Insert {
4465				extrinsic: 0,
4466				hash: x0_hash.as_ref().to_vec(),
4467				size: (x0.len()) as u32,
4468			},
4469			IndexOperation::Insert {
4470				extrinsic: 1,
4471				hash: x1_hash.as_ref().to_vec(),
4472				size: (x1.len() + 1) as u32,
4473			},
4474		];
4475		insert_block(
4476			&backend,
4477			0,
4478			Default::default(),
4479			None,
4480			Default::default(),
4481			vec![
4482				UncheckedXt::new_transaction(0.into(), ()),
4483				UncheckedXt::new_transaction(1.into(), ()),
4484			],
4485			Some(index),
4486		)
4487		.unwrap();
4488		let bc = backend.blockchain();
4489		assert_eq!(bc.indexed_transaction(x0_hash).unwrap().unwrap(), &x0[..]);
4490		assert_eq!(bc.indexed_transaction(x1_hash).unwrap(), None);
4491	}
4492
4493	#[test]
4494	fn renew_transaction_storage() {
4495		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
4496		let mut blocks = Vec::new();
4497		let mut prev_hash = Default::default();
4498		let x1 = UncheckedXt::new_transaction(0.into(), ()).encode();
4499		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
4500		for i in 0..10 {
4501			let mut index = Vec::new();
4502			if i == 0 {
4503				index.push(IndexOperation::Insert {
4504					extrinsic: 0,
4505					hash: x1_hash.as_ref().to_vec(),
4506					size: (x1.len() - 1) as u32,
4507				});
4508			} else if i < 5 {
4509				// keep renewing 1st
4510				index.push(IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() });
4511			} // else stop renewing
4512			let hash = insert_block(
4513				&backend,
4514				i,
4515				prev_hash,
4516				None,
4517				Default::default(),
4518				vec![UncheckedXt::new_transaction(i.into(), ())],
4519				Some(index),
4520			)
4521			.unwrap();
4522			blocks.push(hash);
4523			prev_hash = hash;
4524		}
4525
4526		for i in 1..10 {
4527			let mut op = backend.begin_operation().unwrap();
4528			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4529			op.mark_finalized(blocks[i], None).unwrap();
4530			backend.commit_operation(op).unwrap();
4531			let bc = backend.blockchain();
4532			if i < 6 {
4533				assert!(bc.indexed_transaction(x1_hash).unwrap().is_some());
4534			} else {
4535				assert!(bc.indexed_transaction(x1_hash).unwrap().is_none());
4536			}
4537		}
4538	}
4539
4540	#[test]
4541	fn multi_renew_transaction_storage() {
4542		// Test that multiple renewals within a single extrinsic work correctly
4543		// and that data survives across the renewal window.
4544		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
4545		let mut blocks = Vec::new();
4546		let mut prev_hash = Default::default();
4547
4548		// Two distinct data items
4549		let x1 = UncheckedXt::new_transaction(0.into(), ()).encode();
4550		let x2 = UncheckedXt::new_transaction(1.into(), ()).encode();
4551		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
4552		let x2_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x2[1..]);
4553
4554		for i in 0..10 {
4555			let mut index = Vec::new();
4556			if i == 0 {
4557				// Block 0: Insert both items as separate extrinsics
4558				index.push(IndexOperation::Insert {
4559					extrinsic: 0,
4560					hash: x1_hash.as_ref().to_vec(),
4561					size: (x1.len() - 1) as u32,
4562				});
4563				index.push(IndexOperation::Insert {
4564					extrinsic: 1,
4565					hash: x2_hash.as_ref().to_vec(),
4566					size: (x2.len() - 1) as u32,
4567				});
4568			} else if i < 5 {
4569				// Blocks 1-4: Renew BOTH items in a single extrinsic (multi-renew)
4570				index.push(IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() });
4571				index.push(IndexOperation::Renew { extrinsic: 0, hash: x2_hash.as_ref().to_vec() });
4572			}
4573			// Blocks 5+: stop renewing
4574
4575			let body = if i == 0 {
4576				vec![
4577					UncheckedXt::new_transaction(0.into(), ()),
4578					UncheckedXt::new_transaction(1.into(), ()),
4579				]
4580			} else {
4581				vec![UncheckedXt::new_transaction(i.into(), ())]
4582			};
4583			let hash =
4584				insert_block(&backend, i, prev_hash, None, Default::default(), body, Some(index))
4585					.unwrap();
4586			blocks.push(hash);
4587			prev_hash = hash;
4588		}
4589
4590		// Finalize progressively and check that both items survive while renewed
4591		for i in 1..10 {
4592			let mut op = backend.begin_operation().unwrap();
4593			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4594			op.mark_finalized(blocks[i], None).unwrap();
4595			backend.commit_operation(op).unwrap();
4596			let bc = backend.blockchain();
4597			if i < 6 {
4598				assert!(
4599					bc.indexed_transaction(x1_hash).unwrap().is_some(),
4600					"x1 should exist at finalization step {i}"
4601				);
4602				assert!(
4603					bc.indexed_transaction(x2_hash).unwrap().is_some(),
4604					"x2 should exist at finalization step {i}"
4605				);
4606			} else {
4607				assert!(
4608					bc.indexed_transaction(x1_hash).unwrap().is_none(),
4609					"x1 should be pruned at finalization step {i}"
4610				);
4611				assert!(
4612					bc.indexed_transaction(x2_hash).unwrap().is_none(),
4613					"x2 should be pruned at finalization step {i}"
4614				);
4615			}
4616		}
4617	}
4618
4619	#[test]
4620	fn multi_renew_block_indexed_body() {
4621		// Test that block_indexed_body returns data for all hashes in a MultiRenew extrinsic.
4622		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(10), 10);
4623
4624		let x1 = UncheckedXt::new_transaction(0.into(), ()).encode();
4625		let x2 = UncheckedXt::new_transaction(1.into(), ()).encode();
4626		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
4627		let x2_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x2[1..]);
4628
4629		// Block 0: Insert both items
4630		let block0 = insert_block(
4631			&backend,
4632			0,
4633			Default::default(),
4634			None,
4635			Default::default(),
4636			vec![
4637				UncheckedXt::new_transaction(0.into(), ()),
4638				UncheckedXt::new_transaction(1.into(), ()),
4639			],
4640			Some(vec![
4641				IndexOperation::Insert {
4642					extrinsic: 0,
4643					hash: x1_hash.as_ref().to_vec(),
4644					size: (x1.len() - 1) as u32,
4645				},
4646				IndexOperation::Insert {
4647					extrinsic: 1,
4648					hash: x2_hash.as_ref().to_vec(),
4649					size: (x2.len() - 1) as u32,
4650				},
4651			]),
4652		)
4653		.unwrap();
4654
4655		// Block 1: Multi-renew both in a single extrinsic
4656		let block1 = insert_block(
4657			&backend,
4658			1,
4659			block0,
4660			None,
4661			Default::default(),
4662			vec![UncheckedXt::new_transaction(10.into(), ())],
4663			Some(vec![
4664				IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() },
4665				IndexOperation::Renew { extrinsic: 0, hash: x2_hash.as_ref().to_vec() },
4666			]),
4667		)
4668		.unwrap();
4669
4670		let bc = backend.blockchain();
4671		let indexed_body = bc.block_indexed_body(block1).unwrap().unwrap();
4672		assert_eq!(indexed_body.len(), 2, "Should have 2 indexed data blobs");
4673		assert_eq!(&indexed_body[0][..], &x1[1..]);
4674		assert_eq!(&indexed_body[1][..], &x2[1..]);
4675	}
4676
4677	#[test]
4678	fn multi_renew_prune_releases_all() {
4679		// Test that pruning a block with MultiRenew correctly releases all ref counts.
4680		// Use BlocksPruning::Some(2) and build enough blocks so both the insert block
4681		// and the multi-renew block get pruned.
4682		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
4683		let mut blocks = Vec::new();
4684		let mut prev_hash = Default::default();
4685
4686		let x1 = UncheckedXt::new_transaction(0.into(), ()).encode();
4687		let x2 = UncheckedXt::new_transaction(1.into(), ()).encode();
4688		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
4689		let x2_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x2[1..]);
4690
4691		for i in 0..6 {
4692			let mut index = Vec::new();
4693			let body = if i == 0 {
4694				// Block 0: Insert both items
4695				index.push(IndexOperation::Insert {
4696					extrinsic: 0,
4697					hash: x1_hash.as_ref().to_vec(),
4698					size: (x1.len() - 1) as u32,
4699				});
4700				index.push(IndexOperation::Insert {
4701					extrinsic: 1,
4702					hash: x2_hash.as_ref().to_vec(),
4703					size: (x2.len() - 1) as u32,
4704				});
4705				vec![
4706					UncheckedXt::new_transaction(0.into(), ()),
4707					UncheckedXt::new_transaction(1.into(), ()),
4708				]
4709			} else if i == 1 {
4710				// Block 1: Multi-renew both in one extrinsic
4711				index.push(IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() });
4712				index.push(IndexOperation::Renew { extrinsic: 0, hash: x2_hash.as_ref().to_vec() });
4713				vec![UncheckedXt::new_transaction(10.into(), ())]
4714			} else {
4715				// Blocks 2+: empty, just advancing
4716				vec![UncheckedXt::new_transaction(i.into(), ())]
4717			};
4718			let hash =
4719				insert_block(&backend, i, prev_hash, None, Default::default(), body, Some(index))
4720					.unwrap();
4721			blocks.push(hash);
4722			prev_hash = hash;
4723		}
4724
4725		let bc = backend.blockchain();
4726		// Before finalization, data exists
4727		assert!(bc.indexed_transaction(x1_hash).unwrap().is_some());
4728		assert!(bc.indexed_transaction(x2_hash).unwrap().is_some());
4729
4730		// Finalize progressively
4731		for i in 1..6 {
4732			let mut op = backend.begin_operation().unwrap();
4733			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4734			op.mark_finalized(blocks[i], None).unwrap();
4735			backend.commit_operation(op).unwrap();
4736		}
4737
4738		// After finalizing block 5 with pruning=2, blocks 0-3 are pruned.
4739		// Both insert (block 0) and multi-renew (block 1) refs are released.
4740		assert!(
4741			bc.indexed_transaction(x1_hash).unwrap().is_none(),
4742			"x1 should be gone after all referring blocks are pruned"
4743		);
4744		assert!(
4745			bc.indexed_transaction(x2_hash).unwrap().is_none(),
4746			"x2 should be gone after all referring blocks are pruned"
4747		);
4748	}
4749
4750	#[test]
4751	fn multi_renew_body_reconstruction() {
4752		// Test that body_uncached can reconstruct extrinsics from MultiRenew blocks.
4753		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(10), 10);
4754
4755		let x1 = UncheckedXt::new_transaction(0.into(), ()).encode();
4756		let x2 = UncheckedXt::new_transaction(1.into(), ()).encode();
4757		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
4758		let x2_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x2[1..]);
4759
4760		// Block 0: Insert both
4761		let block0 = insert_block(
4762			&backend,
4763			0,
4764			Default::default(),
4765			None,
4766			Default::default(),
4767			vec![
4768				UncheckedXt::new_transaction(0.into(), ()),
4769				UncheckedXt::new_transaction(1.into(), ()),
4770			],
4771			Some(vec![
4772				IndexOperation::Insert {
4773					extrinsic: 0,
4774					hash: x1_hash.as_ref().to_vec(),
4775					size: (x1.len() - 1) as u32,
4776				},
4777				IndexOperation::Insert {
4778					extrinsic: 1,
4779					hash: x2_hash.as_ref().to_vec(),
4780					size: (x2.len() - 1) as u32,
4781				},
4782			]),
4783		)
4784		.unwrap();
4785
4786		// Block 1: Multi-renew both in one extrinsic
4787		let renew_xt = UncheckedXt::new_transaction(10.into(), ());
4788		let block1 = insert_block(
4789			&backend,
4790			1,
4791			block0,
4792			None,
4793			Default::default(),
4794			vec![renew_xt.clone()],
4795			Some(vec![
4796				IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() },
4797				IndexOperation::Renew { extrinsic: 0, hash: x2_hash.as_ref().to_vec() },
4798			]),
4799		)
4800		.unwrap();
4801
4802		// Reconstruct body from block 1
4803		let bc = backend.blockchain();
4804		let body = bc.body(block1).unwrap().unwrap();
4805		assert_eq!(body.len(), 1, "Block 1 has one extrinsic");
4806		assert_eq!(body[0], renew_xt, "Extrinsic should be reconstructed correctly");
4807	}
4808
4809	#[test]
4810	fn single_renew_backwards_compatible() {
4811		// Verify that a single renewal per extrinsic still uses DbExtrinsic::Indexed,
4812		// preserving backwards compatibility.
4813		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
4814		let mut prev_hash = Default::default();
4815
4816		let x1 = UncheckedXt::new_transaction(0.into(), ()).encode();
4817		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
4818
4819		// Block 0: Insert
4820		let block0 = insert_block(
4821			&backend,
4822			0,
4823			prev_hash,
4824			None,
4825			Default::default(),
4826			vec![UncheckedXt::new_transaction(0.into(), ())],
4827			Some(vec![IndexOperation::Insert {
4828				extrinsic: 0,
4829				hash: x1_hash.as_ref().to_vec(),
4830				size: (x1.len() - 1) as u32,
4831			}]),
4832		)
4833		.unwrap();
4834		prev_hash = block0;
4835
4836		// Block 1: Single renew (should produce Indexed, not MultiRenew)
4837		let block1 = insert_block(
4838			&backend,
4839			1,
4840			prev_hash,
4841			None,
4842			Default::default(),
4843			vec![UncheckedXt::new_transaction(1.into(), ())],
4844			Some(vec![IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() }]),
4845		)
4846		.unwrap();
4847
4848		// Verify data is accessible
4849		let bc = backend.blockchain();
4850		assert!(bc.indexed_transaction(x1_hash).unwrap().is_some());
4851
4852		// Verify body can be reconstructed (confirms Indexed variant works)
4853		let body = bc.body(block1).unwrap().unwrap();
4854		assert_eq!(body.len(), 1);
4855		assert_eq!(body[0], UncheckedXt::new_transaction(1.into(), ()));
4856
4857		// Verify block_indexed_body returns the data
4858		let indexed = bc.block_indexed_body(block1).unwrap().unwrap();
4859		assert_eq!(indexed.len(), 1);
4860		assert_eq!(&indexed[0][..], &x1[1..]);
4861	}
4862
4863	#[test]
4864	fn multi_renew_duplicate_hash_balanced_lifecycle() {
4865		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
4866		let mut blocks = Vec::new();
4867		let mut prev_hash = Default::default();
4868
4869		let x1 = UncheckedXt::new_transaction(0.into(), ()).encode();
4870		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
4871
4872		for i in 0..6 {
4873			let mut index = Vec::new();
4874			let body = if i == 0 {
4875				index.push(IndexOperation::Insert {
4876					extrinsic: 0,
4877					hash: x1_hash.as_ref().to_vec(),
4878					size: (x1.len() - 1) as u32,
4879				});
4880				vec![UncheckedXt::new_transaction(0.into(), ())]
4881			} else if i == 1 {
4882				index.push(IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() });
4883				index.push(IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() });
4884				vec![UncheckedXt::new_transaction(10.into(), ())]
4885			} else {
4886				vec![UncheckedXt::new_transaction(i.into(), ())]
4887			};
4888			let hash =
4889				insert_block(&backend, i, prev_hash, None, Default::default(), body, Some(index))
4890					.unwrap();
4891			blocks.push(hash);
4892			prev_hash = hash;
4893		}
4894
4895		let bc = backend.blockchain();
4896		assert!(bc.indexed_transaction(x1_hash).unwrap().is_some());
4897
4898		for i in 1..6 {
4899			let mut op = backend.begin_operation().unwrap();
4900			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4901			op.mark_finalized(blocks[i], None).unwrap();
4902			backend.commit_operation(op).unwrap();
4903		}
4904
4905		assert!(bc.indexed_transaction(x1_hash).unwrap().is_none());
4906	}
4907
4908	#[test]
4909	fn multi_renew_mixed_duplicates_and_uniques() {
4910		// Ops [W, X, Y, W, Z]: insertion-order preserved, duplicate W kept.
4911		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
4912		let mut blocks = Vec::new();
4913		let mut prev_hash = Default::default();
4914
4915		let w = UncheckedXt::new_transaction(0.into(), ()).encode();
4916		let x = UncheckedXt::new_transaction(1.into(), ()).encode();
4917		let y = UncheckedXt::new_transaction(2.into(), ()).encode();
4918		let z = UncheckedXt::new_transaction(3.into(), ()).encode();
4919		let w_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&w[1..]);
4920		let x_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x[1..]);
4921		let y_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&y[1..]);
4922		let z_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&z[1..]);
4923
4924		for i in 0..6 {
4925			let mut index = Vec::new();
4926			let body = if i == 0 {
4927				index.push(IndexOperation::Insert {
4928					extrinsic: 0,
4929					hash: w_hash.as_ref().to_vec(),
4930					size: (w.len() - 1) as u32,
4931				});
4932				index.push(IndexOperation::Insert {
4933					extrinsic: 1,
4934					hash: x_hash.as_ref().to_vec(),
4935					size: (x.len() - 1) as u32,
4936				});
4937				index.push(IndexOperation::Insert {
4938					extrinsic: 2,
4939					hash: y_hash.as_ref().to_vec(),
4940					size: (y.len() - 1) as u32,
4941				});
4942				index.push(IndexOperation::Insert {
4943					extrinsic: 3,
4944					hash: z_hash.as_ref().to_vec(),
4945					size: (z.len() - 1) as u32,
4946				});
4947				vec![
4948					UncheckedXt::new_transaction(0.into(), ()),
4949					UncheckedXt::new_transaction(1.into(), ()),
4950					UncheckedXt::new_transaction(2.into(), ()),
4951					UncheckedXt::new_transaction(3.into(), ()),
4952				]
4953			} else if i == 1 {
4954				// 5 ops: W appears twice (positions 0 and 3), X/Y/Z once each.
4955				index.push(IndexOperation::Renew { extrinsic: 0, hash: w_hash.as_ref().to_vec() });
4956				index.push(IndexOperation::Renew { extrinsic: 0, hash: x_hash.as_ref().to_vec() });
4957				index.push(IndexOperation::Renew { extrinsic: 0, hash: y_hash.as_ref().to_vec() });
4958				index.push(IndexOperation::Renew { extrinsic: 0, hash: w_hash.as_ref().to_vec() });
4959				index.push(IndexOperation::Renew { extrinsic: 0, hash: z_hash.as_ref().to_vec() });
4960				vec![UncheckedXt::new_transaction(10.into(), ())]
4961			} else {
4962				vec![UncheckedXt::new_transaction(i.into(), ())]
4963			};
4964			let hash =
4965				insert_block(&backend, i, prev_hash, None, Default::default(), body, Some(index))
4966					.unwrap();
4967			blocks.push(hash);
4968			prev_hash = hash;
4969		}
4970
4971		let bc = backend.blockchain();
4972
4973		let indexed_body = bc.block_indexed_body(blocks[1]).unwrap().unwrap();
4974		assert_eq!(indexed_body.len(), 5);
4975		assert_eq!(&indexed_body[0][..], &w[1..]);
4976		assert_eq!(&indexed_body[1][..], &x[1..]);
4977		assert_eq!(&indexed_body[2][..], &y[1..]);
4978		assert_eq!(&indexed_body[3][..], &w[1..]);
4979		assert_eq!(&indexed_body[4][..], &z[1..]);
4980
4981		for i in 1..6 {
4982			let mut op = backend.begin_operation().unwrap();
4983			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4984			op.mark_finalized(blocks[i], None).unwrap();
4985			backend.commit_operation(op).unwrap();
4986		}
4987
4988		assert!(bc.indexed_transaction(w_hash).unwrap().is_none(), "W deleted");
4989		assert!(bc.indexed_transaction(x_hash).unwrap().is_none(), "X deleted");
4990		assert!(bc.indexed_transaction(y_hash).unwrap().is_none(), "Y deleted");
4991		assert!(bc.indexed_transaction(z_hash).unwrap().is_none(), "Z deleted");
4992	}
4993
4994	#[test]
4995	fn block_indexed_body_preserves_renew_op_submission_order() {
4996		// `block_indexed_body(N)` returns blobs in submission order of the underlying
4997		// Renew ops. Sorting (e.g. via BTreeSet) would desync off-chain proof
4998		// construction from on-chain verification.
4999		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::KeepAll, 10);
5000
5001		let payloads: Vec<Vec<u8>> = (0..5)
5002			.map(|i: u64| UncheckedXt::new_transaction(i.into(), ()).encode())
5003			.collect();
5004		let hashes: Vec<<HashingFor<Block> as sp_core::Hasher>::Out> = payloads
5005			.iter()
5006			.map(|p| <HashingFor<Block> as sp_core::Hasher>::hash(&p[1..]))
5007			.collect();
5008
5009		let mut prev_hash = Default::default();
5010		let insert_ops: Vec<IndexOperation> = (0..5)
5011			.map(|i| IndexOperation::Insert {
5012				extrinsic: i as u32,
5013				hash: hashes[i].as_ref().to_vec(),
5014				size: (payloads[i].len() - 1) as u32,
5015			})
5016			.collect();
5017		let body0: Vec<UncheckedXt> =
5018			(0..5).map(|i| UncheckedXt::new_transaction((i as u64).into(), ())).collect();
5019		prev_hash =
5020			insert_block(&backend, 0, prev_hash, None, Default::default(), body0, Some(insert_ops))
5021				.unwrap();
5022
5023		// Non-monotonic submission order so any sort would visibly disturb it.
5024		let submission_order = [4usize, 1, 0, 3, 2];
5025		let renew_ops: Vec<IndexOperation> = submission_order
5026			.iter()
5027			.map(|&i| IndexOperation::Renew { extrinsic: 0, hash: hashes[i].as_ref().to_vec() })
5028			.collect();
5029		let block1 = insert_block(
5030			&backend,
5031			1,
5032			prev_hash,
5033			None,
5034			Default::default(),
5035			vec![UncheckedXt::new_transaction(100.into(), ())],
5036			Some(renew_ops),
5037		)
5038		.unwrap();
5039
5040		let bc = backend.blockchain();
5041		let body_index_bytes = read_db(
5042			&*backend.storage.db,
5043			columns::KEY_LOOKUP,
5044			columns::BODY_INDEX,
5045			BlockId::<Block>::Hash(block1),
5046		)
5047		.unwrap()
5048		.expect("block 1 must have a BODY_INDEX entry");
5049		let decoded: Vec<DbExtrinsic<Block>> =
5050			Decode::decode(&mut &body_index_bytes[..]).expect("must decode");
5051		assert_eq!(decoded.len(), 1);
5052		match &decoded[0] {
5053			DbExtrinsic::MultiRenew { hashes: stored_hashes, .. } => {
5054				assert_eq!(stored_hashes.len(), 5);
5055				for (i, &order_idx) in submission_order.iter().enumerate() {
5056					assert_eq!(stored_hashes[i].as_ref(), hashes[order_idx].as_ref());
5057				}
5058			},
5059			other => panic!("expected MultiRenew; got {other:?}"),
5060		}
5061
5062		let blobs = bc.block_indexed_body(block1).unwrap().unwrap();
5063		assert_eq!(blobs.len(), 5);
5064		for (i, &order_idx) in submission_order.iter().enumerate() {
5065			assert_eq!(blobs[i].as_slice(), &payloads[order_idx][1..]);
5066		}
5067	}
5068
5069	#[test]
5070	fn insert_and_renew_same_index_renew_wins() {
5071		// Documents the pre-existing precedence in apply_index_ops: when both an Insert
5072		// and a Renew op target the same extrinsic_index, the Renew wins and the Insert
5073		// is silently discarded — the Insert's data write to the TRANSACTION column
5074		// never happens.
5075		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(10), 10);
5076
5077		let x = UncheckedXt::new_transaction(0.into(), ()).encode();
5078		let y = UncheckedXt::new_transaction(1.into(), ()).encode();
5079		let x_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x[1..]);
5080		let y_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&y[1..]);
5081
5082		// Block 0: Insert X normally — X is now stored.
5083		let block0 = insert_block(
5084			&backend,
5085			0,
5086			Default::default(),
5087			None,
5088			Default::default(),
5089			vec![UncheckedXt::new_transaction(0.into(), ())],
5090			Some(vec![IndexOperation::Insert {
5091				extrinsic: 0,
5092				hash: x_hash.as_ref().to_vec(),
5093				size: (x.len() - 1) as u32,
5094			}]),
5095		)
5096		.unwrap();
5097
5098		// Block 1: ops contain BOTH Insert{0, Y, ...} and Renew{0, X} for the same extrinsic.
5099		// Per apply_index_ops precedence, Renew wins and Insert{Y} is silently dropped.
5100		let block1 = insert_block(
5101			&backend,
5102			1,
5103			block0,
5104			None,
5105			Default::default(),
5106			vec![UncheckedXt::new_transaction(99.into(), ())],
5107			Some(vec![
5108				IndexOperation::Insert {
5109					extrinsic: 0,
5110					hash: y_hash.as_ref().to_vec(),
5111					size: (y.len() - 1) as u32,
5112				},
5113				IndexOperation::Renew { extrinsic: 0, hash: x_hash.as_ref().to_vec() },
5114			]),
5115		)
5116		.unwrap();
5117
5118		let bc = backend.blockchain();
5119
5120		assert!(bc.indexed_transaction(x_hash).unwrap().is_some());
5121		assert!(
5122			bc.indexed_transaction(y_hash).unwrap().is_none(),
5123			"Insert at the same extrinsic index as a Renew is silently dropped",
5124		);
5125
5126		let indexed = bc.block_indexed_body(block1).unwrap().unwrap();
5127		assert_eq!(indexed.len(), 1);
5128		assert_eq!(&indexed[0][..], &x[1..]);
5129	}
5130
5131	#[test]
5132	fn db_extrinsic_encoding_round_trip() {
5133		let entries: Vec<DbExtrinsic<Block>> = vec![
5134			DbExtrinsic::Indexed { hash: H256::repeat_byte(0xAA), header: vec![0x01, 0x02, 0x03] },
5135			DbExtrinsic::Full(UncheckedXt::new_transaction(42.into(), ())),
5136			DbExtrinsic::MultiRenew {
5137				hashes: vec![H256::repeat_byte(0xBB), H256::repeat_byte(0xCC)],
5138				extrinsic: vec![0x04, 0x05, 0x06, 0x07],
5139			},
5140		];
5141
5142		let encoded = entries.encode();
5143		let decoded: Vec<DbExtrinsic<Block>> =
5144			Decode::decode(&mut &encoded[..]).expect("encoded DbExtrinsic vec must decode");
5145		assert_eq!(encoded, decoded.encode());
5146	}
5147
5148	#[test]
5149	fn apply_index_ops_deterministic() {
5150		let body = vec![
5151			UncheckedXt::new_transaction(0.into(), ()),
5152			UncheckedXt::new_transaction(1.into(), ()),
5153		];
5154		let h1 = H256::repeat_byte(0x11).as_ref().to_vec();
5155		let h2 = H256::repeat_byte(0x22).as_ref().to_vec();
5156		let h3 = H256::repeat_byte(0x33).as_ref().to_vec();
5157
5158		let ops = vec![
5159			IndexOperation::Renew { extrinsic: 0, hash: h1.clone() },
5160			IndexOperation::Renew { extrinsic: 0, hash: h2.clone() },
5161			IndexOperation::Renew { extrinsic: 0, hash: h1.clone() },
5162			IndexOperation::Renew { extrinsic: 1, hash: h3.clone() },
5163		];
5164
5165		let mut tx1: Transaction<DbHash> = Transaction::new();
5166		let bytes1 = apply_index_ops::<Block>(&mut tx1, body.clone(), ops.clone());
5167
5168		let mut tx2: Transaction<DbHash> = Transaction::new();
5169		let bytes2 = apply_index_ops::<Block>(&mut tx2, body, ops);
5170
5171		assert_eq!(bytes1, bytes2);
5172
5173		let decoded: Vec<DbExtrinsic<Block>> =
5174			Decode::decode(&mut &bytes1[..]).expect("apply_index_ops output must decode");
5175		assert_eq!(decoded.len(), 2);
5176		match &decoded[0] {
5177			DbExtrinsic::MultiRenew { hashes, .. } => {
5178				assert_eq!(hashes.len(), 3);
5179				assert_eq!(hashes[0].as_ref(), h1.as_slice());
5180				assert_eq!(hashes[1].as_ref(), h2.as_slice());
5181				assert_eq!(hashes[2].as_ref(), h1.as_slice());
5182			},
5183			other => panic!("expected MultiRenew, got {other:?}"),
5184		}
5185		assert!(matches!(decoded[1], DbExtrinsic::Indexed { .. }));
5186	}
5187
5188	#[test]
5189	fn multi_renew_in_one_block_indexed_in_another() {
5190		// X across three blocks: Insert, single Renew, duplicate Renew. Refcount peaks at 4.
5191		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
5192		let mut blocks = Vec::new();
5193		let mut prev_hash = Default::default();
5194
5195		let x = UncheckedXt::new_transaction(0.into(), ()).encode();
5196		let x_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x[1..]);
5197
5198		for i in 0..6 {
5199			let mut index = Vec::new();
5200			let body = if i == 0 {
5201				index.push(IndexOperation::Insert {
5202					extrinsic: 0,
5203					hash: x_hash.as_ref().to_vec(),
5204					size: (x.len() - 1) as u32,
5205				});
5206				vec![UncheckedXt::new_transaction(0.into(), ())]
5207			} else if i == 1 {
5208				index.push(IndexOperation::Renew { extrinsic: 0, hash: x_hash.as_ref().to_vec() });
5209				vec![UncheckedXt::new_transaction(10.into(), ())]
5210			} else if i == 2 {
5211				index.push(IndexOperation::Renew { extrinsic: 0, hash: x_hash.as_ref().to_vec() });
5212				index.push(IndexOperation::Renew { extrinsic: 0, hash: x_hash.as_ref().to_vec() });
5213				vec![UncheckedXt::new_transaction(20.into(), ())]
5214			} else {
5215				vec![UncheckedXt::new_transaction(i.into(), ())]
5216			};
5217			let hash =
5218				insert_block(&backend, i, prev_hash, None, Default::default(), body, Some(index))
5219					.unwrap();
5220			blocks.push(hash);
5221			prev_hash = hash;
5222		}
5223
5224		let bc = backend.blockchain();
5225		assert!(bc.indexed_transaction(x_hash).unwrap().is_some());
5226
5227		for i in 1..6 {
5228			let mut op = backend.begin_operation().unwrap();
5229			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
5230			op.mark_finalized(blocks[i], None).unwrap();
5231			backend.commit_operation(op).unwrap();
5232		}
5233
5234		assert!(bc.indexed_transaction(x_hash).unwrap().is_none());
5235	}
5236
5237	#[test]
5238	fn remove_leaf_block_works() {
5239		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
5240		let mut blocks = Vec::new();
5241		let mut prev_hash = Default::default();
5242		for i in 0..2 {
5243			let hash = insert_block(
5244				&backend,
5245				i,
5246				prev_hash,
5247				None,
5248				Default::default(),
5249				vec![UncheckedXt::new_transaction(i.into(), ())],
5250				None,
5251			)
5252			.unwrap();
5253			blocks.push(hash);
5254			prev_hash = hash;
5255		}
5256
5257		for i in 0..2 {
5258			let hash = insert_block(
5259				&backend,
5260				2,
5261				blocks[1],
5262				None,
5263				sp_core::H256::random(),
5264				vec![UncheckedXt::new_transaction(i.into(), ())],
5265				None,
5266			)
5267			.unwrap();
5268			blocks.push(hash);
5269		}
5270
5271		// insert a fork at block 1, which becomes best block
5272		let best_hash = insert_block(
5273			&backend,
5274			1,
5275			blocks[0],
5276			None,
5277			sp_core::H256::random(),
5278			vec![UncheckedXt::new_transaction(42.into(), ())],
5279			None,
5280		)
5281		.unwrap();
5282
5283		assert_eq!(backend.blockchain().info().best_hash, best_hash);
5284		assert!(backend.remove_leaf_block(best_hash).is_err());
5285
5286		assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], blocks[3], best_hash]);
5287		assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2], blocks[3]]);
5288
5289		assert!(backend.have_state_at(blocks[3], 2));
5290		assert!(backend.blockchain().header(blocks[3]).unwrap().is_some());
5291		backend.remove_leaf_block(blocks[3]).unwrap();
5292		assert!(!backend.have_state_at(blocks[3], 2));
5293		assert!(backend.blockchain().header(blocks[3]).unwrap().is_none());
5294		assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], best_hash]);
5295		assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2]]);
5296
5297		assert!(backend.have_state_at(blocks[2], 2));
5298		assert!(backend.blockchain().header(blocks[2]).unwrap().is_some());
5299		backend.remove_leaf_block(blocks[2]).unwrap();
5300		assert!(!backend.have_state_at(blocks[2], 2));
5301		assert!(backend.blockchain().header(blocks[2]).unwrap().is_none());
5302		assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash, blocks[1]]);
5303		assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![]);
5304
5305		assert!(backend.have_state_at(blocks[1], 1));
5306		assert!(backend.blockchain().header(blocks[1]).unwrap().is_some());
5307		backend.remove_leaf_block(blocks[1]).unwrap();
5308		assert!(!backend.have_state_at(blocks[1], 1));
5309		assert!(backend.blockchain().header(blocks[1]).unwrap().is_none());
5310		assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash]);
5311		assert_eq!(backend.blockchain().children(blocks[0]).unwrap(), vec![best_hash]);
5312	}
5313
5314	#[test]
5315	fn test_import_existing_block_as_new_head() {
5316		let backend: Backend<Block> = Backend::new_test(10, 3);
5317		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
5318		let block1 = insert_header(&backend, 1, block0, None, Default::default());
5319		let block2 = insert_header(&backend, 2, block1, None, Default::default());
5320		let block3 = insert_header(&backend, 3, block2, None, Default::default());
5321		let block4 = insert_header(&backend, 4, block3, None, Default::default());
5322		let block5 = insert_header(&backend, 5, block4, None, Default::default());
5323		assert_eq!(backend.blockchain().info().best_hash, block5);
5324
5325		// Insert 1 as best again. This should fail because canonicalization_delay == 3 and best ==
5326		// 5
5327		let header = Header {
5328			number: 1,
5329			parent_hash: block0,
5330			state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
5331			digest: Default::default(),
5332			extrinsics_root: Default::default(),
5333		};
5334		let mut op = backend.begin_operation().unwrap();
5335		op.set_block_data(header, None, None, None, NewBlockState::Best, true).unwrap();
5336		assert!(matches!(backend.commit_operation(op), Err(sp_blockchain::Error::SetHeadTooOld)));
5337
5338		// Insert 2 as best again.
5339		let header = backend.blockchain().header(block2).unwrap().unwrap();
5340		let mut op = backend.begin_operation().unwrap();
5341		op.set_block_data(header, None, None, None, NewBlockState::Best, true).unwrap();
5342		backend.commit_operation(op).unwrap();
5343		assert_eq!(backend.blockchain().info().best_hash, block2);
5344	}
5345
5346	#[test]
5347	fn test_import_existing_block_as_final() {
5348		let backend: Backend<Block> = Backend::new_test(10, 10);
5349		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
5350		let block1 = insert_header(&backend, 1, block0, None, Default::default());
5351		let _block2 = insert_header(&backend, 2, block1, None, Default::default());
5352		// Genesis is auto finalized, the rest are not.
5353		assert_eq!(backend.blockchain().info().finalized_hash, block0);
5354
5355		// Insert 1 as final again.
5356		let header = backend.blockchain().header(block1).unwrap().unwrap();
5357
5358		let mut op = backend.begin_operation().unwrap();
5359		op.set_block_data(header, None, None, None, NewBlockState::Final, true).unwrap();
5360		backend.commit_operation(op).unwrap();
5361
5362		assert_eq!(backend.blockchain().info().finalized_hash, block1);
5363	}
5364
5365	#[test]
5366	fn test_import_existing_state_fails() {
5367		let backend: Backend<Block> = Backend::new_test(10, 10);
5368		let genesis =
5369			insert_block(&backend, 0, Default::default(), None, Default::default(), vec![], None)
5370				.unwrap();
5371
5372		insert_block(&backend, 1, genesis, None, Default::default(), vec![], None).unwrap();
5373		let err = insert_block(&backend, 1, genesis, None, Default::default(), vec![], None)
5374			.err()
5375			.unwrap();
5376		match err {
5377			sp_blockchain::Error::StateDatabase(m) if m == "Block already exists" => (),
5378			e @ _ => panic!("Unexpected error {:?}", e),
5379		}
5380	}
5381
5382	#[test]
5383	fn test_leaves_not_created_for_ancient_blocks() {
5384		let backend: Backend<Block> = Backend::new_test(10, 10);
5385		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
5386
5387		let block1_a = insert_header(&backend, 1, block0, None, Default::default());
5388		let block2_a = insert_header(&backend, 2, block1_a, None, Default::default());
5389		backend.finalize_block(block1_a, None).unwrap();
5390		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]);
5391
5392		// Insert a fork prior to finalization point. Leave should not be created.
5393		insert_header_no_head(&backend, 1, block0, [1; 32].into());
5394		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]);
5395	}
5396
5397	#[test]
5398	fn revert_non_best_blocks() {
5399		let backend = Backend::<Block>::new_test(10, 10);
5400
5401		let genesis =
5402			insert_block(&backend, 0, Default::default(), None, Default::default(), vec![], None)
5403				.unwrap();
5404
5405		let block1 =
5406			insert_block(&backend, 1, genesis, None, Default::default(), vec![], None).unwrap();
5407
5408		let block2 =
5409			insert_block(&backend, 2, block1, None, Default::default(), vec![], None).unwrap();
5410
5411		let block3 = {
5412			let mut op = backend.begin_operation().unwrap();
5413			backend.begin_state_operation(&mut op, block1).unwrap();
5414			let header = Header {
5415				number: 3,
5416				parent_hash: block2,
5417				state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
5418				digest: Default::default(),
5419				extrinsics_root: Default::default(),
5420			};
5421
5422			op.set_block_data(
5423				header.clone(),
5424				Some(Vec::new()),
5425				None,
5426				None,
5427				NewBlockState::Normal,
5428				true,
5429			)
5430			.unwrap();
5431
5432			backend.commit_operation(op).unwrap();
5433
5434			header.hash()
5435		};
5436
5437		let block4 = {
5438			let mut op = backend.begin_operation().unwrap();
5439			backend.begin_state_operation(&mut op, block2).unwrap();
5440			let header = Header {
5441				number: 4,
5442				parent_hash: block3,
5443				state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
5444				digest: Default::default(),
5445				extrinsics_root: Default::default(),
5446			};
5447
5448			op.set_block_data(
5449				header.clone(),
5450				Some(Vec::new()),
5451				None,
5452				None,
5453				NewBlockState::Normal,
5454				true,
5455			)
5456			.unwrap();
5457
5458			backend.commit_operation(op).unwrap();
5459
5460			header.hash()
5461		};
5462
5463		let block3_fork = {
5464			let mut op = backend.begin_operation().unwrap();
5465			backend.begin_state_operation(&mut op, block2).unwrap();
5466			let header = Header {
5467				number: 3,
5468				parent_hash: block2,
5469				state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
5470				digest: Default::default(),
5471				extrinsics_root: H256::from_low_u64_le(42),
5472			};
5473
5474			op.set_block_data(
5475				header.clone(),
5476				Some(Vec::new()),
5477				None,
5478				None,
5479				NewBlockState::Normal,
5480				true,
5481			)
5482			.unwrap();
5483
5484			backend.commit_operation(op).unwrap();
5485
5486			header.hash()
5487		};
5488
5489		assert!(backend.have_state_at(block1, 1));
5490		assert!(backend.have_state_at(block2, 2));
5491		assert!(backend.have_state_at(block3, 3));
5492		assert!(backend.have_state_at(block4, 4));
5493		assert!(backend.have_state_at(block3_fork, 3));
5494
5495		assert_eq!(backend.blockchain.leaves().unwrap(), vec![block4, block3_fork]);
5496		assert_eq!(4, backend.blockchain.leaves.read().highest_leaf().unwrap().0);
5497
5498		assert_eq!(3, backend.revert(1, false).unwrap().0);
5499
5500		assert!(backend.have_state_at(block1, 1));
5501
5502		let ensure_pruned = |hash, number: u32| {
5503			assert_eq!(
5504				backend.blockchain.status(hash).unwrap(),
5505				sc_client_api::blockchain::BlockStatus::Unknown
5506			);
5507			assert!(
5508				backend
5509					.blockchain
5510					.db
5511					.get(columns::BODY, &number_and_hash_to_lookup_key(number, hash).unwrap())
5512					.is_none(),
5513				"{number}"
5514			);
5515			assert!(
5516				backend
5517					.blockchain
5518					.db
5519					.get(columns::HEADER, &number_and_hash_to_lookup_key(number, hash).unwrap())
5520					.is_none(),
5521				"{number}"
5522			);
5523		};
5524
5525		ensure_pruned(block2, 2);
5526		ensure_pruned(block3, 3);
5527		ensure_pruned(block4, 4);
5528		ensure_pruned(block3_fork, 3);
5529
5530		assert_eq!(backend.blockchain.leaves().unwrap(), vec![block1]);
5531		assert_eq!(1, backend.blockchain.leaves.read().highest_leaf().unwrap().0);
5532	}
5533
5534	#[test]
5535	fn revert_finalized_blocks() {
5536		let pruning_modes = [BlocksPruning::Some(10), BlocksPruning::KeepAll];
5537
5538		// we will create a chain with 11 blocks, finalize block #8 and then
5539		// attempt to revert 5 blocks.
5540		for pruning_mode in pruning_modes {
5541			let backend = Backend::<Block>::new_test_with_tx_storage(pruning_mode, 1);
5542
5543			let mut parent = Default::default();
5544			for i in 0..=10 {
5545				parent = insert_block(&backend, i, parent, None, Default::default(), vec![], None)
5546					.unwrap();
5547			}
5548
5549			assert_eq!(backend.blockchain().info().best_number, 10);
5550
5551			let block8 = backend.blockchain().hash(8).unwrap().unwrap();
5552			backend.finalize_block(block8, None).unwrap();
5553			backend.revert(5, true).unwrap();
5554
5555			match pruning_mode {
5556				// we can only revert to blocks for which we have state, if pruning is enabled
5557				// then the last state available will be that of the latest finalized block
5558				BlocksPruning::Some(_) => {
5559					assert_eq!(backend.blockchain().info().finalized_number, 8)
5560				},
5561				// otherwise if we're not doing state pruning we can revert past finalized blocks
5562				_ => assert_eq!(backend.blockchain().info().finalized_number, 5),
5563			}
5564		}
5565	}
5566
5567	#[test]
5568	fn test_no_duplicated_leaves_allowed() {
5569		let backend: Backend<Block> = Backend::new_test(10, 10);
5570		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
5571		let block1 = insert_header(&backend, 1, block0, None, Default::default());
5572		// Add block 2 not as the best block
5573		let block2 = insert_header_no_head(&backend, 2, block1, Default::default());
5574		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]);
5575		assert_eq!(backend.blockchain().info().best_hash, block1);
5576
5577		// Add block 2 as the best block
5578		let block2 = insert_header(&backend, 2, block1, None, Default::default());
5579		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]);
5580		assert_eq!(backend.blockchain().info().best_hash, block2);
5581	}
5582
5583	#[test]
5584	fn force_delayed_canonicalize_waiting_for_blocks_to_be_finalized() {
5585		let pruning_modes =
5586			[BlocksPruning::Some(10), BlocksPruning::KeepAll, BlocksPruning::KeepFinalized];
5587
5588		for pruning_mode in pruning_modes {
5589			eprintln!("Running with pruning mode: {:?}", pruning_mode);
5590
5591			let backend = Backend::<Block>::new_test_with_tx_storage(pruning_mode, 1);
5592
5593			let genesis = insert_block(
5594				&backend,
5595				0,
5596				Default::default(),
5597				None,
5598				Default::default(),
5599				vec![],
5600				None,
5601			)
5602			.unwrap();
5603
5604			let block1 = {
5605				let mut op = backend.begin_operation().unwrap();
5606				backend.begin_state_operation(&mut op, genesis).unwrap();
5607				let mut header = Header {
5608					number: 1,
5609					parent_hash: genesis,
5610					state_root: Default::default(),
5611					digest: Default::default(),
5612					extrinsics_root: Default::default(),
5613				};
5614
5615				let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))];
5616
5617				let (root, overlay) = op.old_state.storage_root(
5618					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
5619					StateVersion::V1,
5620				);
5621				op.update_db_storage(overlay).unwrap();
5622				header.state_root = root.into();
5623
5624				op.update_storage(storage, Vec::new()).unwrap();
5625
5626				op.set_block_data(
5627					header.clone(),
5628					Some(Vec::new()),
5629					None,
5630					None,
5631					NewBlockState::Normal,
5632					true,
5633				)
5634				.unwrap();
5635
5636				backend.commit_operation(op).unwrap();
5637
5638				header.hash()
5639			};
5640
5641			if matches!(pruning_mode, BlocksPruning::Some(_)) {
5642				assert_eq!(
5643					LastCanonicalized::Block(0),
5644					backend.storage.state_db.last_canonicalized()
5645				);
5646			}
5647
5648			// This should not trigger any forced canonicalization as we didn't have imported any
5649			// best block by now.
5650			let block2 = {
5651				let mut op = backend.begin_operation().unwrap();
5652				backend.begin_state_operation(&mut op, block1).unwrap();
5653				let mut header = Header {
5654					number: 2,
5655					parent_hash: block1,
5656					state_root: Default::default(),
5657					digest: Default::default(),
5658					extrinsics_root: Default::default(),
5659				};
5660
5661				let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 2]))];
5662
5663				let (root, overlay) = op.old_state.storage_root(
5664					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
5665					StateVersion::V1,
5666				);
5667				op.update_db_storage(overlay).unwrap();
5668				header.state_root = root.into();
5669
5670				op.update_storage(storage, Vec::new()).unwrap();
5671
5672				op.set_block_data(
5673					header.clone(),
5674					Some(Vec::new()),
5675					None,
5676					None,
5677					NewBlockState::Normal,
5678					true,
5679				)
5680				.unwrap();
5681
5682				backend.commit_operation(op).unwrap();
5683
5684				header.hash()
5685			};
5686
5687			if matches!(pruning_mode, BlocksPruning::Some(_)) {
5688				assert_eq!(
5689					LastCanonicalized::Block(0),
5690					backend.storage.state_db.last_canonicalized()
5691				);
5692			}
5693
5694			// This should also not trigger it yet, because we import a best block, but the best
5695			// block from the POV of the db is still at `0`.
5696			let block3 = {
5697				let mut op = backend.begin_operation().unwrap();
5698				backend.begin_state_operation(&mut op, block2).unwrap();
5699				let mut header = Header {
5700					number: 3,
5701					parent_hash: block2,
5702					state_root: Default::default(),
5703					digest: Default::default(),
5704					extrinsics_root: Default::default(),
5705				};
5706
5707				let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 3]))];
5708
5709				let (root, overlay) = op.old_state.storage_root(
5710					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
5711					StateVersion::V1,
5712				);
5713				op.update_db_storage(overlay).unwrap();
5714				header.state_root = root.into();
5715
5716				op.update_storage(storage, Vec::new()).unwrap();
5717
5718				op.set_block_data(
5719					header.clone(),
5720					Some(Vec::new()),
5721					None,
5722					None,
5723					NewBlockState::Best,
5724					true,
5725				)
5726				.unwrap();
5727
5728				backend.commit_operation(op).unwrap();
5729
5730				header.hash()
5731			};
5732
5733			// Now it should kick in.
5734			let block4 = {
5735				let mut op = backend.begin_operation().unwrap();
5736				backend.begin_state_operation(&mut op, block3).unwrap();
5737				let mut header = Header {
5738					number: 4,
5739					parent_hash: block3,
5740					state_root: Default::default(),
5741					digest: Default::default(),
5742					extrinsics_root: Default::default(),
5743				};
5744
5745				let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 4]))];
5746
5747				let (root, overlay) = op.old_state.storage_root(
5748					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
5749					StateVersion::V1,
5750				);
5751				op.update_db_storage(overlay).unwrap();
5752				header.state_root = root.into();
5753
5754				op.update_storage(storage, Vec::new()).unwrap();
5755
5756				op.set_block_data(
5757					header.clone(),
5758					Some(Vec::new()),
5759					None,
5760					None,
5761					NewBlockState::Best,
5762					true,
5763				)
5764				.unwrap();
5765
5766				backend.commit_operation(op).unwrap();
5767
5768				header.hash()
5769			};
5770
5771			if matches!(pruning_mode, BlocksPruning::Some(_)) {
5772				assert_eq!(
5773					LastCanonicalized::Block(2),
5774					backend.storage.state_db.last_canonicalized()
5775				);
5776			}
5777
5778			assert_eq!(block1, backend.blockchain().hash(1).unwrap().unwrap());
5779			assert_eq!(block2, backend.blockchain().hash(2).unwrap().unwrap());
5780			assert_eq!(block3, backend.blockchain().hash(3).unwrap().unwrap());
5781			assert_eq!(block4, backend.blockchain().hash(4).unwrap().unwrap());
5782		}
5783	}
5784
5785	#[test]
5786	fn test_pinned_blocks_on_finalize() {
5787		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
5788		let mut blocks = Vec::new();
5789		let mut prev_hash = Default::default();
5790
5791		let build_justification = |i: u64| ([0, 0, 0, 0], vec![i.try_into().unwrap()]);
5792		// Block tree:
5793		//   0 -> 1 -> 2 -> 3 -> 4
5794		for i in 0..5 {
5795			let hash = insert_block(
5796				&backend,
5797				i,
5798				prev_hash,
5799				None,
5800				Default::default(),
5801				vec![UncheckedXt::new_transaction(i.into(), ())],
5802				None,
5803			)
5804			.unwrap();
5805			blocks.push(hash);
5806			// Avoid block pruning.
5807			backend.pin_block(blocks[i as usize]).unwrap();
5808
5809			prev_hash = hash;
5810		}
5811
5812		let bc = backend.blockchain();
5813
5814		// Check that we can properly access values when there is reference count
5815		// but no value.
5816		assert_eq!(
5817			Some(vec![UncheckedXt::new_transaction(1.into(), ())]),
5818			bc.body(blocks[1]).unwrap()
5819		);
5820
5821		// Block 1 gets pinned three times
5822		backend.pin_block(blocks[1]).unwrap();
5823		backend.pin_block(blocks[1]).unwrap();
5824
5825		// Finalize all blocks. This will trigger pruning.
5826		let mut op = backend.begin_operation().unwrap();
5827		backend.begin_state_operation(&mut op, blocks[4]).unwrap();
5828		for i in 1..5 {
5829			op.mark_finalized(blocks[i], Some(build_justification(i.try_into().unwrap())))
5830				.unwrap();
5831		}
5832		backend.commit_operation(op).unwrap();
5833
5834		// Block 0, 1, 2, 3 are pinned, so all values should be cached.
5835		// Block 4 is inside the pruning window, its value is in db.
5836		assert_eq!(
5837			Some(vec![UncheckedXt::new_transaction(0.into(), ())]),
5838			bc.body(blocks[0]).unwrap()
5839		);
5840
5841		assert_eq!(
5842			Some(vec![UncheckedXt::new_transaction(1.into(), ())]),
5843			bc.body(blocks[1]).unwrap()
5844		);
5845		assert_eq!(
5846			Some(Justifications::from(build_justification(1))),
5847			bc.justifications(blocks[1]).unwrap()
5848		);
5849
5850		assert_eq!(
5851			Some(vec![UncheckedXt::new_transaction(2.into(), ())]),
5852			bc.body(blocks[2]).unwrap()
5853		);
5854		assert_eq!(
5855			Some(Justifications::from(build_justification(2))),
5856			bc.justifications(blocks[2]).unwrap()
5857		);
5858
5859		assert_eq!(
5860			Some(vec![UncheckedXt::new_transaction(3.into(), ())]),
5861			bc.body(blocks[3]).unwrap()
5862		);
5863		assert_eq!(
5864			Some(Justifications::from(build_justification(3))),
5865			bc.justifications(blocks[3]).unwrap()
5866		);
5867
5868		assert_eq!(
5869			Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
5870			bc.body(blocks[4]).unwrap()
5871		);
5872		assert_eq!(
5873			Some(Justifications::from(build_justification(4))),
5874			bc.justifications(blocks[4]).unwrap()
5875		);
5876
5877		// Unpin all blocks. Values should be removed from cache.
5878		for block in &blocks {
5879			backend.unpin_block(*block);
5880		}
5881
5882		assert!(bc.body(blocks[0]).unwrap().is_none());
5883		// Block 1 was pinned twice, we expect it to be still cached
5884		assert!(bc.body(blocks[1]).unwrap().is_some());
5885		assert!(bc.justifications(blocks[1]).unwrap().is_some());
5886		// Headers should also be available while pinned
5887		assert!(bc.header(blocks[1]).ok().flatten().is_some());
5888		assert!(bc.body(blocks[2]).unwrap().is_none());
5889		assert!(bc.justifications(blocks[2]).unwrap().is_none());
5890		assert!(bc.body(blocks[3]).unwrap().is_none());
5891		assert!(bc.justifications(blocks[3]).unwrap().is_none());
5892
5893		// After these unpins, block 1 should also be removed
5894		backend.unpin_block(blocks[1]);
5895		assert!(bc.body(blocks[1]).unwrap().is_some());
5896		assert!(bc.justifications(blocks[1]).unwrap().is_some());
5897		backend.unpin_block(blocks[1]);
5898		assert!(bc.body(blocks[1]).unwrap().is_none());
5899		assert!(bc.justifications(blocks[1]).unwrap().is_none());
5900
5901		// Block 4 is inside the pruning window and still kept
5902		assert_eq!(
5903			Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
5904			bc.body(blocks[4]).unwrap()
5905		);
5906		assert_eq!(
5907			Some(Justifications::from(build_justification(4))),
5908			bc.justifications(blocks[4]).unwrap()
5909		);
5910
5911		// Block tree:
5912		//   0 -> 1 -> 2 -> 3 -> 4 -> 5
5913		let hash = insert_block(
5914			&backend,
5915			5,
5916			prev_hash,
5917			None,
5918			Default::default(),
5919			vec![UncheckedXt::new_transaction(5.into(), ())],
5920			None,
5921		)
5922		.unwrap();
5923		blocks.push(hash);
5924
5925		backend.pin_block(blocks[4]).unwrap();
5926		// Mark block 5 as finalized.
5927		let mut op = backend.begin_operation().unwrap();
5928		backend.begin_state_operation(&mut op, blocks[5]).unwrap();
5929		op.mark_finalized(blocks[5], Some(build_justification(5))).unwrap();
5930		backend.commit_operation(op).unwrap();
5931
5932		assert!(bc.body(blocks[0]).unwrap().is_none());
5933		assert!(bc.body(blocks[1]).unwrap().is_none());
5934		assert!(bc.body(blocks[2]).unwrap().is_none());
5935		assert!(bc.body(blocks[3]).unwrap().is_none());
5936
5937		assert_eq!(
5938			Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
5939			bc.body(blocks[4]).unwrap()
5940		);
5941		assert_eq!(
5942			Some(Justifications::from(build_justification(4))),
5943			bc.justifications(blocks[4]).unwrap()
5944		);
5945		assert_eq!(
5946			Some(vec![UncheckedXt::new_transaction(5.into(), ())]),
5947			bc.body(blocks[5]).unwrap()
5948		);
5949		assert!(bc.header(blocks[5]).ok().flatten().is_some());
5950
5951		backend.unpin_block(blocks[4]);
5952		assert!(bc.body(blocks[4]).unwrap().is_none());
5953		assert!(bc.justifications(blocks[4]).unwrap().is_none());
5954
5955		// Append a justification to block 5.
5956		backend.append_justification(blocks[5], ([0, 0, 0, 1], vec![42])).unwrap();
5957
5958		let hash = insert_block(
5959			&backend,
5960			6,
5961			blocks[5],
5962			None,
5963			Default::default(),
5964			vec![UncheckedXt::new_transaction(6.into(), ())],
5965			None,
5966		)
5967		.unwrap();
5968		blocks.push(hash);
5969
5970		// Pin block 5 so it gets loaded into the cache on prune
5971		backend.pin_block(blocks[5]).unwrap();
5972
5973		// Finalize block 6 so block 5 gets pruned. Since it is pinned both justifications should be
5974		// in memory.
5975		let mut op = backend.begin_operation().unwrap();
5976		backend.begin_state_operation(&mut op, blocks[6]).unwrap();
5977		op.mark_finalized(blocks[6], None).unwrap();
5978		backend.commit_operation(op).unwrap();
5979
5980		assert_eq!(
5981			Some(vec![UncheckedXt::new_transaction(5.into(), ())]),
5982			bc.body(blocks[5]).unwrap()
5983		);
5984		assert!(bc.header(blocks[5]).ok().flatten().is_some());
5985		let mut expected = Justifications::from(build_justification(5));
5986		expected.append(([0, 0, 0, 1], vec![42]));
5987		assert_eq!(Some(expected), bc.justifications(blocks[5]).unwrap());
5988	}
5989
5990	#[test]
5991	fn test_pinned_blocks_on_finalize_with_fork() {
5992		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
5993		let mut blocks = Vec::new();
5994		let mut prev_hash = Default::default();
5995
5996		// Block tree:
5997		//   0 -> 1 -> 2 -> 3 -> 4
5998		for i in 0..5 {
5999			let hash = insert_block(
6000				&backend,
6001				i,
6002				prev_hash,
6003				None,
6004				Default::default(),
6005				vec![UncheckedXt::new_transaction(i.into(), ())],
6006				None,
6007			)
6008			.unwrap();
6009			blocks.push(hash);
6010
6011			// Avoid block pruning.
6012			backend.pin_block(blocks[i as usize]).unwrap();
6013
6014			prev_hash = hash;
6015		}
6016
6017		// Insert a fork at the second block.
6018		// Block tree:
6019		//   0 -> 1 -> 2 -> 3 -> 4
6020		//        \ -> 2 -> 3
6021		let fork_hash_root = insert_block(
6022			&backend,
6023			2,
6024			blocks[1],
6025			None,
6026			H256::random(),
6027			vec![UncheckedXt::new_transaction(2.into(), ())],
6028			None,
6029		)
6030		.unwrap();
6031		let fork_hash_3 = insert_block(
6032			&backend,
6033			3,
6034			fork_hash_root,
6035			None,
6036			H256::random(),
6037			vec![
6038				UncheckedXt::new_transaction(3.into(), ()),
6039				UncheckedXt::new_transaction(11.into(), ()),
6040			],
6041			None,
6042		)
6043		.unwrap();
6044
6045		// Do not prune the fork hash.
6046		backend.pin_block(fork_hash_3).unwrap();
6047
6048		let mut op = backend.begin_operation().unwrap();
6049		backend.begin_state_operation(&mut op, blocks[4]).unwrap();
6050		op.mark_head(blocks[4]).unwrap();
6051		backend.commit_operation(op).unwrap();
6052
6053		for i in 1..5 {
6054			let mut op = backend.begin_operation().unwrap();
6055			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
6056			op.mark_finalized(blocks[i], None).unwrap();
6057			backend.commit_operation(op).unwrap();
6058		}
6059
6060		let bc = backend.blockchain();
6061		assert_eq!(
6062			Some(vec![UncheckedXt::new_transaction(0.into(), ())]),
6063			bc.body(blocks[0]).unwrap()
6064		);
6065		assert_eq!(
6066			Some(vec![UncheckedXt::new_transaction(1.into(), ())]),
6067			bc.body(blocks[1]).unwrap()
6068		);
6069		assert_eq!(
6070			Some(vec![UncheckedXt::new_transaction(2.into(), ())]),
6071			bc.body(blocks[2]).unwrap()
6072		);
6073		assert_eq!(
6074			Some(vec![UncheckedXt::new_transaction(3.into(), ())]),
6075			bc.body(blocks[3]).unwrap()
6076		);
6077		assert_eq!(
6078			Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
6079			bc.body(blocks[4]).unwrap()
6080		);
6081		// Check the fork hashes.
6082		assert_eq!(None, bc.body(fork_hash_root).unwrap());
6083		assert_eq!(
6084			Some(vec![
6085				UncheckedXt::new_transaction(3.into(), ()),
6086				UncheckedXt::new_transaction(11.into(), ())
6087			]),
6088			bc.body(fork_hash_3).unwrap()
6089		);
6090
6091		// Unpin all blocks, except the forked one.
6092		for block in &blocks {
6093			backend.unpin_block(*block);
6094		}
6095		assert!(bc.body(blocks[0]).unwrap().is_none());
6096		assert!(bc.body(blocks[1]).unwrap().is_none());
6097		assert!(bc.body(blocks[2]).unwrap().is_none());
6098		assert!(bc.body(blocks[3]).unwrap().is_none());
6099
6100		assert!(bc.body(fork_hash_3).unwrap().is_some());
6101		backend.unpin_block(fork_hash_3);
6102		assert!(bc.body(fork_hash_3).unwrap().is_none());
6103	}
6104
6105	#[test]
6106	fn prune_blocks_with_empty_predicates_prunes_all() {
6107		// Test backward compatibility: empty predicates means all blocks are pruned
6108		let backend = Backend::<Block>::new_test_with_tx_storage_and_filters(
6109			BlocksPruning::Some(2),
6110			0,
6111			vec![], // Empty predicates
6112		);
6113
6114		let mut blocks = Vec::new();
6115		let mut prev_hash = Default::default();
6116
6117		// Create 5 blocks
6118		for i in 0..5 {
6119			let hash = insert_block(
6120				&backend,
6121				i,
6122				prev_hash,
6123				None,
6124				Default::default(),
6125				vec![UncheckedXt::new_transaction(i.into(), ())],
6126				None,
6127			)
6128			.unwrap();
6129			blocks.push(hash);
6130			prev_hash = hash;
6131		}
6132
6133		// Justification - but no predicate to preserve it
6134		let justification = (CONS0_ENGINE_ID, vec![1, 2, 3]);
6135
6136		// Finalize blocks, adding justification to block 1
6137		{
6138			let mut op = backend.begin_operation().unwrap();
6139			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
6140			op.mark_finalized(blocks[1], Some(justification.clone())).unwrap();
6141			op.mark_finalized(blocks[2], None).unwrap();
6142			op.mark_finalized(blocks[3], None).unwrap();
6143			op.mark_finalized(blocks[4], None).unwrap();
6144			backend.commit_operation(op).unwrap();
6145		}
6146
6147		let bc = backend.blockchain();
6148
6149		// All blocks outside pruning window should be pruned, even with justification
6150		assert_eq!(None, bc.body(blocks[0]).unwrap());
6151		assert_eq!(None, bc.body(blocks[1]).unwrap()); // Has justification but no predicate
6152		assert_eq!(None, bc.body(blocks[2]).unwrap());
6153
6154		// Blocks 3 and 4 are within the pruning window
6155		assert!(bc.body(blocks[3]).unwrap().is_some());
6156		assert!(bc.body(blocks[4]).unwrap().is_some());
6157	}
6158
6159	#[test]
6160	fn prune_blocks_multiple_filters_or_logic() {
6161		// Test that multiple filters use OR logic: if ANY filter matches, block is kept
6162		let backend = Backend::<Block>::new_test_with_tx_storage_and_filters(
6163			BlocksPruning::Some(2),
6164			0,
6165			vec![
6166				Arc::new(|j: &Justifications| j.get(CONS0_ENGINE_ID).is_some()),
6167				Arc::new(|j: &Justifications| j.get(CONS1_ENGINE_ID).is_some()),
6168			],
6169		);
6170
6171		let mut blocks = Vec::new();
6172		let mut prev_hash = Default::default();
6173
6174		// Create 7 blocks
6175		for i in 0..7 {
6176			let hash = insert_block(
6177				&backend,
6178				i,
6179				prev_hash,
6180				None,
6181				Default::default(),
6182				vec![UncheckedXt::new_transaction(i.into(), ())],
6183				None,
6184			)
6185			.unwrap();
6186			blocks.push(hash);
6187			prev_hash = hash;
6188		}
6189
6190		let cons0_justification = (CONS0_ENGINE_ID, vec![1, 2, 3]);
6191		let cons1_justification = (CONS1_ENGINE_ID, vec![4, 5, 6]);
6192
6193		// Finalize blocks with different justification patterns
6194		{
6195			let mut op = backend.begin_operation().unwrap();
6196			backend.begin_state_operation(&mut op, blocks[6]).unwrap();
6197			// Block 1: CONS0 only - should be preserved
6198			op.mark_finalized(blocks[1], Some(cons0_justification.clone())).unwrap();
6199			// Block 2: CONS1 only - should be preserved
6200			op.mark_finalized(blocks[2], Some(cons1_justification.clone())).unwrap();
6201			// Block 3: No justification - should be pruned
6202			op.mark_finalized(blocks[3], None).unwrap();
6203			// Block 4: Random/unknown engine ID - should be pruned
6204			op.mark_finalized(blocks[4], Some(([9, 9, 9, 9], vec![7, 8, 9]))).unwrap();
6205			op.mark_finalized(blocks[5], None).unwrap();
6206			op.mark_finalized(blocks[6], None).unwrap();
6207			backend.commit_operation(op).unwrap();
6208		}
6209
6210		let bc = backend.blockchain();
6211
6212		// Block 0 should be pruned (outside window, no justification)
6213		assert_eq!(None, bc.body(blocks[0]).unwrap());
6214
6215		// Block 1 should be preserved (has CONS0 justification)
6216		assert!(bc.body(blocks[1]).unwrap().is_some());
6217
6218		// Block 2 should be preserved (has CONS1 justification)
6219		assert!(bc.body(blocks[2]).unwrap().is_some());
6220
6221		// Block 3 should be pruned (no justification)
6222		assert_eq!(None, bc.body(blocks[3]).unwrap());
6223
6224		// Block 4 should be pruned (unknown engine ID)
6225		assert_eq!(None, bc.body(blocks[4]).unwrap());
6226
6227		// Blocks 5 and 6 are within the pruning window
6228		assert!(bc.body(blocks[5]).unwrap().is_some());
6229		assert!(bc.body(blocks[6]).unwrap().is_some());
6230	}
6231
6232	#[test]
6233	fn prune_blocks_filter_only_matches_specific_engine() {
6234		// Test that a filter for one engine ID does NOT preserve blocks with a different engine ID
6235		let backend = Backend::<Block>::new_test_with_tx_storage_and_filters(
6236			BlocksPruning::Some(2),
6237			0,
6238			vec![Arc::new(|j: &Justifications| j.get(CONS0_ENGINE_ID).is_some())],
6239		);
6240
6241		let mut blocks = Vec::new();
6242		let mut prev_hash = Default::default();
6243
6244		// Create 5 blocks
6245		for i in 0..5 {
6246			let hash = insert_block(
6247				&backend,
6248				i,
6249				prev_hash,
6250				None,
6251				Default::default(),
6252				vec![UncheckedXt::new_transaction(i.into(), ())],
6253				None,
6254			)
6255			.unwrap();
6256			blocks.push(hash);
6257			prev_hash = hash;
6258		}
6259
6260		let cons1_justification = (CONS1_ENGINE_ID, vec![4, 5, 6]);
6261
6262		// Finalize blocks, adding CONS1 justification to block 1
6263		{
6264			let mut op = backend.begin_operation().unwrap();
6265			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
6266			// Block 1 gets CONS1 justification - should NOT be preserved by CONS0 filter
6267			op.mark_finalized(blocks[1], Some(cons1_justification.clone())).unwrap();
6268			op.mark_finalized(blocks[2], None).unwrap();
6269			op.mark_finalized(blocks[3], None).unwrap();
6270			op.mark_finalized(blocks[4], None).unwrap();
6271			backend.commit_operation(op).unwrap();
6272		}
6273
6274		let bc = backend.blockchain();
6275
6276		// Block 0 should be pruned
6277		assert_eq!(None, bc.body(blocks[0]).unwrap());
6278
6279		// Block 1 should also be pruned (CONS1 justification, but only CONS0 filter)
6280		assert_eq!(None, bc.body(blocks[1]).unwrap());
6281
6282		// Block 2 should be pruned
6283		assert_eq!(None, bc.body(blocks[2]).unwrap());
6284
6285		// Blocks 3 and 4 are within the pruning window
6286		assert!(bc.body(blocks[3]).unwrap().is_some());
6287		assert!(bc.body(blocks[4]).unwrap().is_some());
6288	}
6289
6290	/// Insert a header without body as best block. This triggers `MissingBody` gap creation
6291	/// when the parent header exists and `create_gap` is true.
6292	fn insert_header_no_body_as_best(
6293		backend: &Backend<Block>,
6294		number: u64,
6295		parent_hash: H256,
6296	) -> H256 {
6297		use sp_runtime::testing::Digest;
6298
6299		let digest = Digest::default();
6300		let header = Header {
6301			number,
6302			parent_hash,
6303			state_root: Default::default(),
6304			digest,
6305			extrinsics_root: Default::default(),
6306		};
6307
6308		let mut op = backend.begin_operation().unwrap();
6309		// body = None triggers MissingBody gap when parent exists
6310		op.set_block_data(header.clone(), None, None, None, NewBlockState::Best, true)
6311			.unwrap();
6312		backend.commit_operation(op).unwrap();
6313
6314		header.hash()
6315	}
6316
6317	/// Re-open a backend from an existing database with the given blocks pruning mode.
6318	fn reopen_backend(
6319		db: Arc<dyn sp_database::Database<DbHash>>,
6320		blocks_pruning: BlocksPruning,
6321	) -> Backend<Block> {
6322		let state_pruning = match blocks_pruning {
6323			BlocksPruning::KeepAll => PruningMode::ArchiveAll,
6324			BlocksPruning::KeepFinalized => PruningMode::ArchiveCanonical,
6325			BlocksPruning::Some(n) => PruningMode::blocks_pruning(n),
6326		};
6327		Backend::<Block>::new(
6328			DatabaseSettings {
6329				trie_cache_maximum_size: Some(16 * 1024 * 1024),
6330				state_pruning: Some(state_pruning),
6331				source: DatabaseSource::Custom { db, require_create_flag: false },
6332				blocks_pruning,
6333				pruning_filters: Default::default(),
6334				metrics_registry: None,
6335			},
6336			0,
6337		)
6338		.unwrap()
6339	}
6340
6341	#[test]
6342	fn missing_body_gap_is_removed_for_non_archive_node() {
6343		// Create a non-archive backend and produce a multi-block MissingBody gap.
6344		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(100), 0);
6345		assert!(!backend.is_archive);
6346
6347		let genesis_hash = insert_header(&backend, 0, Default::default(), None, Default::default());
6348
6349		// Insert blocks 1..3 without bodies — creates a MissingBody gap spanning blocks 1 to 3.
6350		let hash_1 = insert_header_no_body_as_best(&backend, 1, genesis_hash);
6351		let hash_2 = insert_header_no_body_as_best(&backend, 2, hash_1);
6352		insert_header_no_body_as_best(&backend, 3, hash_2);
6353
6354		let info = backend.blockchain().info();
6355		assert!(info.block_gap.is_some(), "MissingBody gap should have been created");
6356		let gap = info.block_gap.unwrap();
6357		assert!(matches!(gap.gap_type, BlockGapType::MissingBody));
6358		assert_eq!(gap.start, 1);
6359		assert_eq!(gap.end, 3);
6360
6361		// Re-open the same database as a non-archive node.
6362		let db = backend.storage.db.clone();
6363		let backend = reopen_backend(db, BlocksPruning::Some(100));
6364		assert!(!backend.is_archive);
6365
6366		// The multi-block gap should have been removed on re-open.
6367		let info = backend.blockchain().info();
6368		assert!(
6369			info.block_gap.is_none(),
6370			"MissingBody gap should be removed for non-archive nodes, got: {:?}",
6371			info.block_gap,
6372		);
6373	}
6374
6375	#[test]
6376	fn missing_body_gap_is_preserved_for_archive_node() {
6377		// Create a backend with archive pruning and produce a multi-block MissingBody gap.
6378		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::KeepAll, 0);
6379		assert!(backend.is_archive);
6380
6381		let genesis_hash = insert_header(&backend, 0, Default::default(), None, Default::default());
6382
6383		// Insert blocks 1..3 without bodies — creates a MissingBody gap spanning blocks 1 to 3.
6384		let hash_1 = insert_header_no_body_as_best(&backend, 1, genesis_hash);
6385		let hash_2 = insert_header_no_body_as_best(&backend, 2, hash_1);
6386		insert_header_no_body_as_best(&backend, 3, hash_2);
6387
6388		let info = backend.blockchain().info();
6389		assert!(info.block_gap.is_some(), "MissingBody gap should have been created");
6390		let gap = info.block_gap.unwrap();
6391		assert!(matches!(gap.gap_type, BlockGapType::MissingBody));
6392		assert_eq!(gap.start, 1);
6393		assert_eq!(gap.end, 3);
6394
6395		// Re-open the same database as an archive node.
6396		let db = backend.storage.db.clone();
6397		let backend = reopen_backend(db, BlocksPruning::KeepAll);
6398		assert!(backend.is_archive);
6399
6400		// The gap should be preserved for archive nodes.
6401		let info = backend.blockchain().info();
6402		assert!(info.block_gap.is_some(), "MissingBody gap should be preserved for archive nodes",);
6403		let gap = info.block_gap.unwrap();
6404		assert!(matches!(gap.gap_type, BlockGapType::MissingBody));
6405		assert_eq!(gap.start, 1);
6406		assert_eq!(gap.end, 3);
6407	}
6408
6409	#[test]
6410	fn missing_header_and_body_gap_is_preserved_for_non_archive_node() {
6411		// Create a non-archive backend and produce a MissingHeaderAndBody gap (from warp sync).
6412		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(100), 0);
6413		assert!(!backend.is_archive);
6414
6415		let _genesis_hash =
6416			insert_header(&backend, 0, Default::default(), None, Default::default());
6417
6418		// Insert a disconnected block at height 3 with a fake parent to create a
6419		// MissingHeaderAndBody gap (blocks 1..2 are missing).
6420		insert_disconnected_header(&backend, 3, H256::from([200; 32]), Default::default(), true);
6421
6422		let info = backend.blockchain().info();
6423		assert!(info.block_gap.is_some(), "Gap should have been created");
6424		let gap = info.block_gap.unwrap();
6425		assert!(matches!(gap.gap_type, BlockGapType::MissingHeaderAndBody));
6426		assert_eq!(gap.start, 1);
6427		assert_eq!(gap.end, 2);
6428
6429		// Re-open the same database as a non-archive node.
6430		let db = backend.storage.db.clone();
6431		let backend = reopen_backend(db, BlocksPruning::Some(100));
6432		assert!(!backend.is_archive);
6433
6434		// The MissingHeaderAndBody gap should NOT be removed — only MissingBody gaps are removed.
6435		let info = backend.blockchain().info();
6436		assert!(
6437			info.block_gap.is_some(),
6438			"MissingHeaderAndBody gap should be preserved for non-archive nodes",
6439		);
6440		let gap = info.block_gap.unwrap();
6441		assert!(matches!(gap.gap_type, BlockGapType::MissingHeaderAndBody));
6442		assert_eq!(gap.start, 1);
6443		assert_eq!(gap.end, 2);
6444	}
6445}