Skip to main content

sc_client_db/
lib.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19//! Client backend that is backed by a database.
20//!
21//! # Canonicality vs. Finality
22//!
23//! Finality indicates that a block will not be reverted, according to the consensus algorithm,
24//! while canonicality indicates that the block may be reverted, but we will be unable to do so,
25//! having discarded heavy state that will allow a chain reorganization.
26//!
27//! Finality implies canonicality but not vice-versa.
28
29#![warn(missing_docs)]
30
31pub mod offchain;
32
33pub mod bench;
34
35mod children;
36mod parity_db;
37mod pinned_blocks_cache;
38mod record_stats_state;
39mod stats;
40#[cfg(any(feature = "rocksdb", test))]
41mod upgrade;
42mod utils;
43
44use linked_hash_map::LinkedHashMap;
45use log::{debug, trace, warn};
46use parking_lot::{Mutex, RwLock};
47use prometheus_endpoint::Registry;
48use std::{
49	collections::{BTreeSet, HashMap, HashSet},
50	io,
51	path::{Path, PathBuf},
52	sync::Arc,
53};
54
55use crate::{
56	pinned_blocks_cache::PinnedBlocksCache,
57	record_stats_state::RecordStatsState,
58	stats::StateUsageStats,
59	utils::{meta_keys, read_db, read_meta, remove_from_db, DatabaseType, Meta},
60};
61use codec::{Decode, Encode};
62use hash_db::Prefix;
63use sc_client_api::{
64	backend::NewBlockState,
65	blockchain::{BlockGap, BlockGapType},
66	leaves::{FinalizationOutcome, LeafSet},
67	utils::is_descendent_of,
68	IoInfo, MemoryInfo, MemorySize, TrieCacheContext, UsageInfo,
69};
70use sc_state_db::{IsPruned, LastCanonicalized, StateDb};
71use sp_arithmetic::traits::Saturating;
72use sp_blockchain::{
73	Backend as _, CachedHeaderMetadata, DisplacedLeavesAfterFinalization, Error as ClientError,
74	HeaderBackend, HeaderMetadata, HeaderMetadataCache, Result as ClientResult,
75};
76use sp_core::{
77	offchain::OffchainOverlayedChange,
78	storage::{well_known_keys, ChildInfo},
79};
80use sp_database::Transaction;
81use sp_runtime::{
82	generic::BlockId,
83	traits::{
84		Block as BlockT, Hash, HashingFor, Header as HeaderT, NumberFor, One, SaturatedConversion,
85		Zero,
86	},
87	Justification, Justifications, StateVersion, Storage,
88};
89use sp_state_machine::{
90	backend::{AsTrieBackend, Backend as StateBackend},
91	BackendTransaction, ChildStorageCollection, DBValue, IndexOperation, IterArgs,
92	OffchainChangesCollection, StateMachineStats, StorageCollection, StorageIterator, StorageKey,
93	StorageValue, UsageInfo as StateUsageInfo,
94};
95use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, MerkleValue, PrefixedMemoryDB};
96use utils::BLOCK_GAP_CURRENT_VERSION;
97
98// Re-export the Database trait so that one can pass an implementation of it.
99pub use sc_state_db::PruningMode;
100pub use sp_database::Database;
101
102pub use bench::BenchmarkingState;
103
104/// Filter to determine if a block should be excluded from pruning.
105///
106/// Note: This filter only affects **block body** (and future header) pruning.
107/// It does **not** affect state pruning, which is configured separately.
108pub trait PruningFilter: Send + Sync {
109	/// Check if a block with the given justifications should be preserved.
110	///
111	/// Returns `true` to preserve the block, `false` to allow pruning.
112	fn should_retain(&self, justifications: &Justifications) -> bool;
113}
114
115impl<F> PruningFilter for F
116where
117	F: Fn(&Justifications) -> bool + Send + Sync,
118{
119	fn should_retain(&self, justifications: &Justifications) -> bool {
120		(self)(justifications)
121	}
122}
123
124const CACHE_HEADERS: usize = 8;
125
126/// DB-backed patricia trie state, transaction type is an overlay of changes to commit.
127pub type DbState<H> = sp_state_machine::TrieBackend<Arc<dyn sp_state_machine::Storage<H>>, H>;
128
129/// Builder for [`DbState`].
130pub type DbStateBuilder<Hasher> =
131	sp_state_machine::TrieBackendBuilder<Arc<dyn sp_state_machine::Storage<Hasher>>, Hasher>;
132
133/// Length of a [`DbHash`].
134const DB_HASH_LEN: usize = 32;
135
136/// Hash type that this backend uses for the database.
137pub type DbHash = sp_core::H256;
138
139/// An extrinsic entry in the database.
140#[derive(Debug, Encode, Decode)]
141enum DbExtrinsic<B: BlockT> {
142	/// Extrinsic that contains indexed data.
143	Indexed {
144		/// Hash of the indexed part.
145		hash: DbHash,
146		/// Extrinsic header.
147		header: Vec<u8>,
148	},
149	/// Complete extrinsic data.
150	Full(B::Extrinsic),
151	/// Extrinsic that renews multiple indexed data items within a single call.
152	/// Used by bulk-renewal inherents (e.g. `process_auto_renewals`) where one extrinsic
153	/// references multiple previously-stored data blobs.
154	MultiRenew {
155		/// Hashes of all renewed indexed data items.
156		hashes: BTreeSet<DbHash>,
157		/// The full encoded extrinsic (used to reconstruct the block body).
158		extrinsic: Vec<u8>,
159	},
160}
161
162/// A reference tracking state.
163///
164/// It makes sure that the hash we are using stays pinned in storage
165/// until this structure is dropped.
166pub struct RefTrackingState<Block: BlockT> {
167	state: DbState<HashingFor<Block>>,
168	storage: Arc<StorageDb<Block>>,
169	parent_hash: Option<Block::Hash>,
170}
171
172impl<B: BlockT> RefTrackingState<B> {
173	fn new(
174		state: DbState<HashingFor<B>>,
175		storage: Arc<StorageDb<B>>,
176		parent_hash: Option<B::Hash>,
177	) -> Self {
178		RefTrackingState { state, parent_hash, storage }
179	}
180}
181
182impl<B: BlockT> Drop for RefTrackingState<B> {
183	fn drop(&mut self) {
184		if let Some(hash) = &self.parent_hash {
185			self.storage.state_db.unpin(hash);
186		}
187	}
188}
189
190impl<Block: BlockT> std::fmt::Debug for RefTrackingState<Block> {
191	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
192		write!(f, "Block {:?}", self.parent_hash)
193	}
194}
195
196/// A raw iterator over the `RefTrackingState`.
197pub struct RawIter<B: BlockT> {
198	inner: <DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::RawIter,
199}
200
201impl<B: BlockT> StorageIterator<HashingFor<B>> for RawIter<B> {
202	type Backend = RefTrackingState<B>;
203	type Error = <DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::Error;
204
205	fn next_key(&mut self, backend: &Self::Backend) -> Option<Result<StorageKey, Self::Error>> {
206		self.inner.next_key(&backend.state)
207	}
208
209	fn next_pair(
210		&mut self,
211		backend: &Self::Backend,
212	) -> Option<Result<(StorageKey, StorageValue), Self::Error>> {
213		self.inner.next_pair(&backend.state)
214	}
215
216	fn was_complete(&self) -> bool {
217		self.inner.was_complete()
218	}
219}
220
221impl<B: BlockT> StateBackend<HashingFor<B>> for RefTrackingState<B> {
222	type Error = <DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::Error;
223	type TrieBackendStorage =
224		<DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::TrieBackendStorage;
225	type RawIter = RawIter<B>;
226
227	fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
228		self.state.storage(key)
229	}
230
231	fn storage_hash(&self, key: &[u8]) -> Result<Option<B::Hash>, Self::Error> {
232		self.state.storage_hash(key)
233	}
234
235	fn child_storage(
236		&self,
237		child_info: &ChildInfo,
238		key: &[u8],
239	) -> Result<Option<Vec<u8>>, Self::Error> {
240		self.state.child_storage(child_info, key)
241	}
242
243	fn child_storage_hash(
244		&self,
245		child_info: &ChildInfo,
246		key: &[u8],
247	) -> Result<Option<B::Hash>, Self::Error> {
248		self.state.child_storage_hash(child_info, key)
249	}
250
251	fn closest_merkle_value(
252		&self,
253		key: &[u8],
254	) -> Result<Option<MerkleValue<B::Hash>>, Self::Error> {
255		self.state.closest_merkle_value(key)
256	}
257
258	fn child_closest_merkle_value(
259		&self,
260		child_info: &ChildInfo,
261		key: &[u8],
262	) -> Result<Option<MerkleValue<B::Hash>>, Self::Error> {
263		self.state.child_closest_merkle_value(child_info, key)
264	}
265
266	fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
267		self.state.exists_storage(key)
268	}
269
270	fn exists_child_storage(
271		&self,
272		child_info: &ChildInfo,
273		key: &[u8],
274	) -> Result<bool, Self::Error> {
275		self.state.exists_child_storage(child_info, key)
276	}
277
278	fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
279		self.state.next_storage_key(key)
280	}
281
282	fn next_child_storage_key(
283		&self,
284		child_info: &ChildInfo,
285		key: &[u8],
286	) -> Result<Option<Vec<u8>>, Self::Error> {
287		self.state.next_child_storage_key(child_info, key)
288	}
289
290	fn storage_root<'a>(
291		&self,
292		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
293		state_version: StateVersion,
294	) -> (B::Hash, BackendTransaction<HashingFor<B>>) {
295		self.state.storage_root(delta, state_version)
296	}
297
298	fn child_storage_root<'a>(
299		&self,
300		child_info: &ChildInfo,
301		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
302		state_version: StateVersion,
303	) -> (B::Hash, bool, BackendTransaction<HashingFor<B>>) {
304		self.state.child_storage_root(child_info, delta, state_version)
305	}
306
307	fn raw_iter(&self, args: IterArgs) -> Result<Self::RawIter, Self::Error> {
308		self.state.raw_iter(args).map(|inner| RawIter { inner })
309	}
310
311	fn register_overlay_stats(&self, stats: &StateMachineStats) {
312		self.state.register_overlay_stats(stats);
313	}
314
315	fn usage_info(&self) -> StateUsageInfo {
316		self.state.usage_info()
317	}
318}
319
320impl<B: BlockT> AsTrieBackend<HashingFor<B>> for RefTrackingState<B> {
321	type TrieBackendStorage =
322		<DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::TrieBackendStorage;
323
324	fn as_trie_backend(
325		&self,
326	) -> &sp_state_machine::TrieBackend<Self::TrieBackendStorage, HashingFor<B>> {
327		&self.state.as_trie_backend()
328	}
329}
330
331/// Database settings.
332pub struct DatabaseSettings {
333	/// The maximum trie cache size in bytes.
334	///
335	/// If `None` is given, the cache is disabled.
336	pub trie_cache_maximum_size: Option<usize>,
337	/// Requested state pruning mode.
338	pub state_pruning: Option<PruningMode>,
339	/// Where to find the database.
340	pub source: DatabaseSource,
341	/// Block pruning mode.
342	///
343	/// NOTE: only finalized blocks are subject for removal!
344	pub blocks_pruning: BlocksPruning,
345	/// Filters to exclude blocks from pruning.
346	///
347	/// If any filter returns `true` for a block's justifications, the block body
348	/// (and in the future, the header) will be preserved even when it falls
349	/// outside the pruning window. Does not affect state pruning.
350	pub pruning_filters: Vec<Arc<dyn PruningFilter>>,
351	/// Prometheus metrics registry.
352	pub metrics_registry: Option<Registry>,
353}
354
355/// Block pruning settings.
356#[derive(Debug, Clone, Copy, PartialEq)]
357pub enum BlocksPruning {
358	/// Keep full block history, of every block that was ever imported.
359	KeepAll,
360	/// Keep full finalized block history.
361	KeepFinalized,
362	/// Keep N recent finalized blocks.
363	Some(u32),
364}
365
366impl BlocksPruning {
367	/// True if this is an archive pruning mode (either KeepAll or KeepFinalized).
368	pub fn is_archive(&self) -> bool {
369		match *self {
370			BlocksPruning::KeepAll | BlocksPruning::KeepFinalized => true,
371			BlocksPruning::Some(_) => false,
372		}
373	}
374}
375
376/// Where to find the database..
377#[derive(Debug, Clone)]
378pub enum DatabaseSource {
379	/// Check given path, and see if there is an existing database there. If it's either `RocksDb`
380	/// or `ParityDb`, use it. If there is none, create a new instance of `ParityDb`.
381	Auto {
382		/// Path to the paritydb database.
383		paritydb_path: PathBuf,
384		/// Path to the rocksdb database.
385		rocksdb_path: PathBuf,
386		/// Cache size in MiB. Used only by `RocksDb` variant of `DatabaseSource`.
387		cache_size: usize,
388	},
389	/// Load a RocksDB database from a given path. Recommended for most uses.
390	#[cfg(feature = "rocksdb")]
391	RocksDb {
392		/// Path to the database.
393		path: PathBuf,
394		/// Cache size in MiB.
395		cache_size: usize,
396	},
397
398	/// Load a ParityDb database from a given path.
399	ParityDb {
400		/// Path to the database.
401		path: PathBuf,
402	},
403
404	/// Use a custom already-open database.
405	Custom {
406		/// the handle to the custom storage
407		db: Arc<dyn Database<DbHash>>,
408
409		/// if set, the `create` flag will be required to open such datasource
410		require_create_flag: bool,
411	},
412}
413
414impl DatabaseSource {
415	/// Return path for databases that are stored on disk.
416	pub fn path(&self) -> Option<&Path> {
417		match self {
418			// as per https://github.com/paritytech/substrate/pull/9500#discussion_r684312550
419			//
420			// IIUC this is needed for polkadot to create its own dbs, so until it can use parity db
421			// I would think rocksdb, but later parity-db.
422			DatabaseSource::Auto { paritydb_path, .. } => Some(paritydb_path),
423			#[cfg(feature = "rocksdb")]
424			DatabaseSource::RocksDb { path, .. } => Some(path),
425			DatabaseSource::ParityDb { path } => Some(path),
426			DatabaseSource::Custom { .. } => None,
427		}
428	}
429
430	/// Set path for databases that are stored on disk.
431	pub fn set_path(&mut self, p: &Path) -> bool {
432		match self {
433			DatabaseSource::Auto { ref mut paritydb_path, .. } => {
434				*paritydb_path = p.into();
435				true
436			},
437			#[cfg(feature = "rocksdb")]
438			DatabaseSource::RocksDb { ref mut path, .. } => {
439				*path = p.into();
440				true
441			},
442			DatabaseSource::ParityDb { ref mut path } => {
443				*path = p.into();
444				true
445			},
446			DatabaseSource::Custom { .. } => false,
447		}
448	}
449}
450
451impl std::fmt::Display for DatabaseSource {
452	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
453		let name = match self {
454			DatabaseSource::Auto { .. } => "Auto",
455			#[cfg(feature = "rocksdb")]
456			DatabaseSource::RocksDb { .. } => "RocksDb",
457			DatabaseSource::ParityDb { .. } => "ParityDb",
458			DatabaseSource::Custom { .. } => "Custom",
459		};
460		write!(f, "{}", name)
461	}
462}
463
464pub(crate) mod columns {
465	pub const META: u32 = crate::utils::COLUMN_META;
466	pub const STATE: u32 = 1;
467	pub const STATE_META: u32 = 2;
468	/// maps hashes to lookup keys and numbers to canon hashes.
469	pub const KEY_LOOKUP: u32 = 3;
470	pub const HEADER: u32 = 4;
471	pub const BODY: u32 = 5;
472	pub const JUSTIFICATIONS: u32 = 6;
473	pub const AUX: u32 = 8;
474	/// Offchain workers local storage
475	pub const OFFCHAIN: u32 = 9;
476	/// Transactions
477	pub const TRANSACTION: u32 = 11;
478	pub const BODY_INDEX: u32 = 12;
479}
480
481struct PendingBlock<Block: BlockT> {
482	header: Block::Header,
483	justifications: Option<Justifications>,
484	body: Option<Vec<Block::Extrinsic>>,
485	indexed_body: Option<Vec<Vec<u8>>>,
486	leaf_state: NewBlockState,
487	register_as_leaf: bool,
488}
489
490// wrapper that implements trait required for state_db
491#[derive(Clone)]
492struct StateMetaDb(Arc<dyn Database<DbHash>>);
493
494impl sc_state_db::MetaDb for StateMetaDb {
495	type Error = sp_database::error::DatabaseError;
496
497	fn get_meta(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
498		Ok(self.0.get(columns::STATE_META, key))
499	}
500}
501
502struct MetaUpdate<Block: BlockT> {
503	pub hash: Block::Hash,
504	pub number: NumberFor<Block>,
505	pub is_best: bool,
506	pub is_finalized: bool,
507	pub with_state: bool,
508}
509
510fn cache_header<Hash: std::cmp::Eq + std::hash::Hash, Header>(
511	cache: &mut LinkedHashMap<Hash, Option<Header>>,
512	hash: Hash,
513	header: Option<Header>,
514) {
515	cache.insert(hash, header);
516	while cache.len() > CACHE_HEADERS {
517		cache.pop_front();
518	}
519}
520
521/// Block database
522pub struct BlockchainDb<Block: BlockT> {
523	db: Arc<dyn Database<DbHash>>,
524	meta: Arc<RwLock<Meta<NumberFor<Block>, Block::Hash>>>,
525	leaves: RwLock<LeafSet<Block::Hash, NumberFor<Block>>>,
526	header_metadata_cache: Arc<HeaderMetadataCache<Block>>,
527	header_cache: Mutex<LinkedHashMap<Block::Hash, Option<Block::Header>>>,
528	pinned_blocks_cache: Arc<RwLock<PinnedBlocksCache<Block>>>,
529}
530
531impl<Block: BlockT> BlockchainDb<Block> {
532	fn new(db: Arc<dyn Database<DbHash>>) -> ClientResult<Self> {
533		let meta = read_meta::<Block>(&*db, columns::HEADER)?;
534		let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?;
535		Ok(BlockchainDb {
536			db,
537			leaves: RwLock::new(leaves),
538			meta: Arc::new(RwLock::new(meta)),
539			header_metadata_cache: Arc::new(HeaderMetadataCache::default()),
540			header_cache: Default::default(),
541			pinned_blocks_cache: Arc::new(RwLock::new(PinnedBlocksCache::new())),
542		})
543	}
544
545	fn update_meta(&self, update: MetaUpdate<Block>) {
546		let MetaUpdate { hash, number, is_best, is_finalized, with_state } = update;
547		let mut meta = self.meta.write();
548		if number.is_zero() {
549			meta.genesis_hash = hash;
550		}
551
552		if is_best {
553			meta.best_number = number;
554			meta.best_hash = hash;
555		}
556
557		if is_finalized {
558			if with_state {
559				meta.finalized_state = Some((hash, number));
560			}
561			meta.finalized_number = number;
562			meta.finalized_hash = hash;
563		}
564	}
565
566	fn update_block_gap(&self, gap: Option<BlockGap<NumberFor<Block>>>) {
567		let mut meta = self.meta.write();
568		meta.block_gap = gap;
569	}
570
571	/// Empty the cache of pinned items.
572	fn clear_pinning_cache(&self) {
573		self.pinned_blocks_cache.write().clear();
574	}
575
576	/// Load a justification into the cache of pinned items.
577	/// Reference count of the item will not be increased. Use this
578	/// to load values for items into the cache which have already been pinned.
579	fn insert_justifications_if_pinned(&self, hash: Block::Hash, justification: Justification) {
580		let mut cache = self.pinned_blocks_cache.write();
581		if !cache.contains(hash) {
582			return;
583		}
584
585		let justifications = Justifications::from(justification);
586		cache.insert_justifications(hash, Some(justifications));
587	}
588
589	/// Load a justification from the db into the cache of pinned items.
590	/// Reference count of the item will not be increased. Use this
591	/// to load values for items into the cache which have already been pinned.
592	fn insert_persisted_justifications_if_pinned(&self, hash: Block::Hash) -> ClientResult<()> {
593		let mut cache = self.pinned_blocks_cache.write();
594		if !cache.contains(hash) {
595			return Ok(());
596		}
597
598		let justifications = self.justifications_uncached(hash)?;
599		cache.insert_justifications(hash, justifications);
600		Ok(())
601	}
602
603	/// Load a block body from the db into the cache of pinned items.
604	/// Reference count of the item will not be increased. Use this
605	/// to load values for items items into the cache which have already been pinned.
606	fn insert_persisted_body_if_pinned(&self, hash: Block::Hash) -> ClientResult<()> {
607		let mut cache = self.pinned_blocks_cache.write();
608		if !cache.contains(hash) {
609			return Ok(());
610		}
611
612		let body = self.body_uncached(hash)?;
613		cache.insert_body(hash, body);
614		Ok(())
615	}
616
617	/// Bump reference count for pinned item.
618	fn bump_ref(&self, hash: Block::Hash) {
619		self.pinned_blocks_cache.write().pin(hash);
620	}
621
622	/// Decrease reference count for pinned item and remove if reference count is 0.
623	fn unpin(&self, hash: Block::Hash) {
624		self.pinned_blocks_cache.write().unpin(hash);
625	}
626
627	fn justifications_uncached(&self, hash: Block::Hash) -> ClientResult<Option<Justifications>> {
628		match read_db(
629			&*self.db,
630			columns::KEY_LOOKUP,
631			columns::JUSTIFICATIONS,
632			BlockId::<Block>::Hash(hash),
633		)? {
634			Some(justifications) => match Decode::decode(&mut &justifications[..]) {
635				Ok(justifications) => Ok(Some(justifications)),
636				Err(err) => {
637					return Err(sp_blockchain::Error::Backend(format!(
638						"Error decoding justifications: {err}"
639					)))
640				},
641			},
642			None => Ok(None),
643		}
644	}
645
646	fn body_uncached(&self, hash: Block::Hash) -> ClientResult<Option<Vec<Block::Extrinsic>>> {
647		if let Some(body) =
648			read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, BlockId::Hash::<Block>(hash))?
649		{
650			// Plain body
651			match Decode::decode(&mut &body[..]) {
652				Ok(body) => return Ok(Some(body)),
653				Err(err) => {
654					return Err(sp_blockchain::Error::Backend(format!(
655						"Error decoding body: {err}"
656					)))
657				},
658			}
659		}
660
661		if let Some(index) = read_db(
662			&*self.db,
663			columns::KEY_LOOKUP,
664			columns::BODY_INDEX,
665			BlockId::Hash::<Block>(hash),
666		)? {
667			match Vec::<DbExtrinsic<Block>>::decode(&mut &index[..]) {
668				Ok(index) => {
669					let mut body = Vec::new();
670					for ex in index {
671						match ex {
672							DbExtrinsic::Indexed { hash, header } => {
673								match self.db.get(columns::TRANSACTION, hash.as_ref()) {
674									Some(t) => {
675										let mut input =
676											utils::join_input(header.as_ref(), t.as_ref());
677										let ex = Block::Extrinsic::decode(&mut input).map_err(
678											|err| {
679												sp_blockchain::Error::Backend(format!(
680													"Error decoding indexed extrinsic: {err}"
681												))
682											},
683										)?;
684										body.push(ex);
685									},
686									None => {
687										return Err(sp_blockchain::Error::Backend(format!(
688											"Missing indexed transaction {hash:?}"
689										)))
690									},
691								};
692							},
693							DbExtrinsic::Full(ex) => {
694								body.push(ex);
695							},
696							DbExtrinsic::MultiRenew { extrinsic, .. } => {
697								// Multi-renewal extrinsic: header contains the full
698								// encoded extrinsic (no indexed data to join).
699								let ex = Block::Extrinsic::decode(&mut &extrinsic[..]).map_err(
700									|err| {
701										sp_blockchain::Error::Backend(format!(
702											"Error decoding multi-renew extrinsic: {err}"
703										))
704									},
705								)?;
706								body.push(ex);
707							},
708						}
709					}
710					return Ok(Some(body));
711				},
712				Err(err) => {
713					return Err(sp_blockchain::Error::Backend(format!(
714						"Error decoding body list: {err}",
715					)))
716				},
717			}
718		}
719		Ok(None)
720	}
721
722	fn block_indexed_hashes_iter(
723		&self,
724		hash: Block::Hash,
725	) -> ClientResult<Option<impl Iterator<Item = DbHash>>> {
726		let Some(body) = read_db(
727			&*self.db,
728			columns::KEY_LOOKUP,
729			columns::BODY_INDEX,
730			BlockId::<Block>::Hash(hash),
731		)?
732		else {
733			return Ok(None);
734		};
735		match Vec::<DbExtrinsic<Block>>::decode(&mut &body[..]) {
736			Ok(index) => Ok(Some(index.into_iter().flat_map(|ex| match ex {
737				DbExtrinsic::Indexed { hash, .. } => vec![hash],
738				DbExtrinsic::MultiRenew { hashes, .. } => hashes.into_iter().collect(),
739				_ => vec![],
740			}))),
741			Err(err) => {
742				Err(sp_blockchain::Error::Backend(format!("Error decoding body list: {err}")))
743			},
744		}
745	}
746}
747
748impl<Block: BlockT> sc_client_api::blockchain::HeaderBackend<Block> for BlockchainDb<Block> {
749	fn header(&self, hash: Block::Hash) -> ClientResult<Option<Block::Header>> {
750		let mut cache = self.header_cache.lock();
751		if let Some(result) = cache.get_refresh(&hash) {
752			return Ok(result.clone());
753		}
754		let header = utils::read_header(
755			&*self.db,
756			columns::KEY_LOOKUP,
757			columns::HEADER,
758			BlockId::<Block>::Hash(hash),
759		)?;
760		cache_header(&mut cache, hash, header.clone());
761		Ok(header)
762	}
763
764	fn info(&self) -> sc_client_api::blockchain::Info<Block> {
765		let meta = self.meta.read();
766		sc_client_api::blockchain::Info {
767			best_hash: meta.best_hash,
768			best_number: meta.best_number,
769			genesis_hash: meta.genesis_hash,
770			finalized_hash: meta.finalized_hash,
771			finalized_number: meta.finalized_number,
772			finalized_state: meta.finalized_state,
773			number_leaves: self.leaves.read().count(),
774			block_gap: meta.block_gap,
775		}
776	}
777
778	fn status(&self, hash: Block::Hash) -> ClientResult<sc_client_api::blockchain::BlockStatus> {
779		match self.header(hash)?.is_some() {
780			true => Ok(sc_client_api::blockchain::BlockStatus::InChain),
781			false => Ok(sc_client_api::blockchain::BlockStatus::Unknown),
782		}
783	}
784
785	fn number(&self, hash: Block::Hash) -> ClientResult<Option<NumberFor<Block>>> {
786		Ok(self.header_metadata(hash).ok().map(|header_metadata| header_metadata.number))
787	}
788
789	fn hash(&self, number: NumberFor<Block>) -> ClientResult<Option<Block::Hash>> {
790		Ok(utils::read_header::<Block>(
791			&*self.db,
792			columns::KEY_LOOKUP,
793			columns::HEADER,
794			BlockId::Number(number),
795		)?
796		.map(|header| header.hash()))
797	}
798}
799
800impl<Block: BlockT> sc_client_api::blockchain::Backend<Block> for BlockchainDb<Block> {
801	fn body(&self, hash: Block::Hash) -> ClientResult<Option<Vec<Block::Extrinsic>>> {
802		let cache = self.pinned_blocks_cache.read();
803		if let Some(result) = cache.body(&hash) {
804			return Ok(result.clone());
805		}
806
807		self.body_uncached(hash)
808	}
809
810	fn justifications(&self, hash: Block::Hash) -> ClientResult<Option<Justifications>> {
811		let cache = self.pinned_blocks_cache.read();
812		if let Some(result) = cache.justifications(&hash) {
813			return Ok(result.clone());
814		}
815
816		self.justifications_uncached(hash)
817	}
818
819	fn last_finalized(&self) -> ClientResult<Block::Hash> {
820		Ok(self.meta.read().finalized_hash)
821	}
822
823	fn leaves(&self) -> ClientResult<Vec<Block::Hash>> {
824		Ok(self.leaves.read().hashes())
825	}
826
827	fn children(&self, parent_hash: Block::Hash) -> ClientResult<Vec<Block::Hash>> {
828		children::read_children(&*self.db, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash)
829	}
830
831	fn indexed_transaction(&self, hash: DbHash) -> ClientResult<Option<Vec<u8>>> {
832		Ok(self.db.get(columns::TRANSACTION, hash.as_ref()))
833	}
834
835	fn has_indexed_transaction(&self, hash: DbHash) -> ClientResult<bool> {
836		Ok(self.db.contains(columns::TRANSACTION, hash.as_ref()))
837	}
838
839	fn block_indexed_hashes(&self, hash: Block::Hash) -> ClientResult<Option<Vec<DbHash>>> {
840		self.block_indexed_hashes_iter(hash).map(|hashes| hashes.map(Iterator::collect))
841	}
842
843	fn block_indexed_body(&self, hash: Block::Hash) -> ClientResult<Option<Vec<Vec<u8>>>> {
844		match self.block_indexed_hashes_iter(hash) {
845			Ok(Some(hashes)) => Ok(Some(
846				hashes
847					.map(|hash| match self.db.get(columns::TRANSACTION, hash.as_ref()) {
848						Some(t) => Ok(t),
849						None => Err(sp_blockchain::Error::Backend(format!(
850							"Missing indexed transaction {hash:?}",
851						))),
852					})
853					.collect::<Result<_, _>>()?,
854			)),
855			Ok(None) => Ok(None),
856			Err(err) => Err(err),
857		}
858	}
859}
860
861impl<Block: BlockT> HeaderMetadata<Block> for BlockchainDb<Block> {
862	type Error = sp_blockchain::Error;
863
864	fn header_metadata(
865		&self,
866		hash: Block::Hash,
867	) -> Result<CachedHeaderMetadata<Block>, Self::Error> {
868		self.header_metadata_cache.header_metadata(hash).map_or_else(
869			|| {
870				self.header(hash)?
871					.map(|header| {
872						let header_metadata = CachedHeaderMetadata::from(&header);
873						self.header_metadata_cache
874							.insert_header_metadata(header_metadata.hash, header_metadata.clone());
875						header_metadata
876					})
877					.ok_or_else(|| {
878						ClientError::UnknownBlock(format!(
879							"Header was not found in the database: {hash:?}",
880						))
881					})
882			},
883			Ok,
884		)
885	}
886
887	fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata<Block>) {
888		self.header_metadata_cache.insert_header_metadata(hash, metadata)
889	}
890
891	fn remove_header_metadata(&self, hash: Block::Hash) {
892		self.header_cache.lock().remove(&hash);
893		self.header_metadata_cache.remove_header_metadata(hash);
894	}
895}
896
897/// Database transaction
898pub struct BlockImportOperation<Block: BlockT> {
899	old_state: RecordStatsState<RefTrackingState<Block>, Block>,
900	db_updates: PrefixedMemoryDB<HashingFor<Block>>,
901	storage_updates: StorageCollection,
902	child_storage_updates: ChildStorageCollection,
903	offchain_storage_updates: OffchainChangesCollection,
904	pending_block: Option<PendingBlock<Block>>,
905	aux_ops: Vec<(Vec<u8>, Option<Vec<u8>>)>,
906	finalized_blocks: Vec<(Block::Hash, Option<Justification>)>,
907	set_head: Option<Block::Hash>,
908	commit_state: bool,
909	create_gap: bool,
910	reset_storage: bool,
911	index_ops: Vec<IndexOperation>,
912}
913
914impl<Block: BlockT> BlockImportOperation<Block> {
915	fn apply_offchain(&mut self, transaction: &mut Transaction<DbHash>) {
916		let mut count = 0;
917		for ((prefix, key), value_operation) in self.offchain_storage_updates.drain(..) {
918			count += 1;
919			let key = crate::offchain::concatenate_prefix_and_key(&prefix, &key);
920			match value_operation {
921				OffchainOverlayedChange::SetValue(val) => {
922					transaction.set_from_vec(columns::OFFCHAIN, &key, val)
923				},
924				OffchainOverlayedChange::Remove => transaction.remove(columns::OFFCHAIN, &key),
925			}
926		}
927
928		if count > 0 {
929			log::debug!(target: "sc_offchain", "Applied {count} offchain indexing changes.");
930		}
931	}
932
933	fn apply_aux(&mut self, transaction: &mut Transaction<DbHash>) {
934		for (key, maybe_val) in self.aux_ops.drain(..) {
935			match maybe_val {
936				Some(val) => transaction.set_from_vec(columns::AUX, &key, val),
937				None => transaction.remove(columns::AUX, &key),
938			}
939		}
940	}
941
942	fn apply_new_state(
943		&mut self,
944		storage: Storage,
945		state_version: StateVersion,
946	) -> ClientResult<Block::Hash> {
947		if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(k)) {
948			return Err(sp_blockchain::Error::InvalidState);
949		}
950
951		let child_delta = storage.children_default.values().map(|child_content| {
952			(
953				&child_content.child_info,
954				child_content.data.iter().map(|(k, v)| (&k[..], Some(&v[..]))),
955			)
956		});
957
958		let (root, transaction) = self.old_state.full_storage_root(
959			storage.top.iter().map(|(k, v)| (&k[..], Some(&v[..]))),
960			child_delta,
961			state_version,
962		);
963
964		self.db_updates = transaction;
965		Ok(root)
966	}
967}
968
969impl<Block: BlockT> sc_client_api::backend::BlockImportOperation<Block>
970	for BlockImportOperation<Block>
971{
972	type State = RecordStatsState<RefTrackingState<Block>, Block>;
973
974	fn state(&self) -> ClientResult<Option<&Self::State>> {
975		Ok(Some(&self.old_state))
976	}
977
978	fn set_block_data(
979		&mut self,
980		header: Block::Header,
981		body: Option<Vec<Block::Extrinsic>>,
982		indexed_body: Option<Vec<Vec<u8>>>,
983		justifications: Option<Justifications>,
984		leaf_state: NewBlockState,
985		register_as_leaf: bool,
986	) -> ClientResult<()> {
987		assert!(self.pending_block.is_none(), "Only one block per operation is allowed");
988		self.pending_block = Some(PendingBlock {
989			header,
990			body,
991			indexed_body,
992			justifications,
993			leaf_state,
994			register_as_leaf,
995		});
996		Ok(())
997	}
998
999	fn update_db_storage(
1000		&mut self,
1001		update: PrefixedMemoryDB<HashingFor<Block>>,
1002	) -> ClientResult<()> {
1003		self.db_updates = update;
1004		Ok(())
1005	}
1006
1007	fn reset_storage(
1008		&mut self,
1009		storage: Storage,
1010		state_version: StateVersion,
1011	) -> ClientResult<Block::Hash> {
1012		let root = self.apply_new_state(storage, state_version)?;
1013		self.commit_state = true;
1014		self.reset_storage = true;
1015		Ok(root)
1016	}
1017
1018	fn set_genesis_state(
1019		&mut self,
1020		storage: Storage,
1021		commit: bool,
1022		state_version: StateVersion,
1023	) -> ClientResult<Block::Hash> {
1024		let root = self.apply_new_state(storage, state_version)?;
1025		self.commit_state = commit;
1026		Ok(root)
1027	}
1028
1029	fn insert_aux<I>(&mut self, ops: I) -> ClientResult<()>
1030	where
1031		I: IntoIterator<Item = (Vec<u8>, Option<Vec<u8>>)>,
1032	{
1033		self.aux_ops.append(&mut ops.into_iter().collect());
1034		Ok(())
1035	}
1036
1037	fn update_storage(
1038		&mut self,
1039		update: StorageCollection,
1040		child_update: ChildStorageCollection,
1041	) -> ClientResult<()> {
1042		self.storage_updates = update;
1043		self.child_storage_updates = child_update;
1044		Ok(())
1045	}
1046
1047	fn update_offchain_storage(
1048		&mut self,
1049		offchain_update: OffchainChangesCollection,
1050	) -> ClientResult<()> {
1051		self.offchain_storage_updates = offchain_update;
1052		Ok(())
1053	}
1054
1055	fn mark_finalized(
1056		&mut self,
1057		block: Block::Hash,
1058		justification: Option<Justification>,
1059	) -> ClientResult<()> {
1060		self.finalized_blocks.push((block, justification));
1061		Ok(())
1062	}
1063
1064	fn mark_head(&mut self, hash: Block::Hash) -> ClientResult<()> {
1065		assert!(self.set_head.is_none(), "Only one set head per operation is allowed");
1066		self.set_head = Some(hash);
1067		Ok(())
1068	}
1069
1070	fn update_transaction_index(&mut self, index_ops: Vec<IndexOperation>) -> ClientResult<()> {
1071		self.index_ops = index_ops;
1072		Ok(())
1073	}
1074
1075	fn set_create_gap(&mut self, create_gap: bool) {
1076		self.create_gap = create_gap;
1077	}
1078}
1079
1080struct StorageDb<Block: BlockT> {
1081	pub db: Arc<dyn Database<DbHash>>,
1082	pub state_db: StateDb<Block::Hash, Vec<u8>, StateMetaDb>,
1083	prefix_keys: bool,
1084}
1085
1086impl<Block: BlockT> sp_state_machine::Storage<HashingFor<Block>> for StorageDb<Block> {
1087	fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result<Option<DBValue>, String> {
1088		if self.prefix_keys {
1089			let key = prefixed_key::<HashingFor<Block>>(key, prefix);
1090			self.state_db.get(&key, self)
1091		} else {
1092			self.state_db.get(key.as_ref(), self)
1093		}
1094		.map_err(|e| format!("Database backend error: {e:?}"))
1095	}
1096}
1097
1098impl<Block: BlockT> sc_state_db::NodeDb for StorageDb<Block> {
1099	type Error = io::Error;
1100	type Key = [u8];
1101
1102	fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
1103		Ok(self.db.get(columns::STATE, key))
1104	}
1105}
1106
1107struct DbGenesisStorage<Block: BlockT> {
1108	root: Block::Hash,
1109	storage: PrefixedMemoryDB<HashingFor<Block>>,
1110}
1111
1112impl<Block: BlockT> DbGenesisStorage<Block> {
1113	pub fn new(root: Block::Hash, storage: PrefixedMemoryDB<HashingFor<Block>>) -> Self {
1114		DbGenesisStorage { root, storage }
1115	}
1116}
1117
1118impl<Block: BlockT> sp_state_machine::Storage<HashingFor<Block>> for DbGenesisStorage<Block> {
1119	fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result<Option<DBValue>, String> {
1120		use hash_db::HashDB;
1121		Ok(self.storage.get(key, prefix))
1122	}
1123}
1124
1125struct EmptyStorage<Block: BlockT>(pub Block::Hash);
1126
1127impl<Block: BlockT> EmptyStorage<Block> {
1128	pub fn new() -> Self {
1129		let mut root = Block::Hash::default();
1130		let mut mdb = MemoryDB::<HashingFor<Block>>::default();
1131		// both triedbmut are the same on empty storage.
1132		sp_trie::trie_types::TrieDBMutBuilderV1::<HashingFor<Block>>::new(&mut mdb, &mut root)
1133			.build();
1134		EmptyStorage(root)
1135	}
1136}
1137
1138impl<Block: BlockT> sp_state_machine::Storage<HashingFor<Block>> for EmptyStorage<Block> {
1139	fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result<Option<DBValue>, String> {
1140		Ok(None)
1141	}
1142}
1143
1144/// Frozen `value` at time `at`.
1145///
1146/// Used as inner structure under lock in `FrozenForDuration`.
1147struct Frozen<T: Clone> {
1148	at: std::time::Instant,
1149	value: Option<T>,
1150}
1151
1152/// Some value frozen for period of time.
1153///
1154/// If time `duration` not passed since the value was instantiated,
1155/// current frozen value is returned. Otherwise, you have to provide
1156/// a new value which will be again frozen for `duration`.
1157pub(crate) struct FrozenForDuration<T: Clone> {
1158	duration: std::time::Duration,
1159	value: parking_lot::Mutex<Frozen<T>>,
1160}
1161
1162impl<T: Clone> FrozenForDuration<T> {
1163	fn new(duration: std::time::Duration) -> Self {
1164		Self { duration, value: Frozen { at: std::time::Instant::now(), value: None }.into() }
1165	}
1166
1167	fn take_or_else<F>(&self, f: F) -> T
1168	where
1169		F: FnOnce() -> T,
1170	{
1171		let mut lock = self.value.lock();
1172		let now = std::time::Instant::now();
1173		match lock.value.as_ref() {
1174			Some(value) if now.saturating_duration_since(lock.at) <= self.duration => value.clone(),
1175			_ => {
1176				let new_value = f();
1177				lock.at = now;
1178				lock.value = Some(new_value.clone());
1179				new_value
1180			},
1181		}
1182	}
1183}
1184
1185/// Disk backend.
1186///
1187/// Disk backend keeps data in a key-value store. In archive mode, trie nodes are kept from all
1188/// blocks. Otherwise, trie nodes are kept only from some recent blocks.
1189pub struct Backend<Block: BlockT> {
1190	storage: Arc<StorageDb<Block>>,
1191	offchain_storage: offchain::LocalStorage,
1192	blockchain: BlockchainDb<Block>,
1193	canonicalization_delay: u64,
1194	import_lock: Arc<RwLock<()>>,
1195	is_archive: bool,
1196	blocks_pruning: BlocksPruning,
1197	io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>,
1198	state_usage: Arc<StateUsageStats>,
1199	genesis_state: RwLock<Option<Arc<DbGenesisStorage<Block>>>>,
1200	shared_trie_cache: Option<sp_trie::cache::SharedTrieCache<HashingFor<Block>>>,
1201	pruning_filters: Vec<Arc<dyn PruningFilter>>,
1202}
1203
1204impl<Block: BlockT> Backend<Block> {
1205	/// Create a new instance of database backend.
1206	///
1207	/// The pruning window is how old a block must be before the state is pruned.
1208	pub fn new(db_config: DatabaseSettings, canonicalization_delay: u64) -> ClientResult<Self> {
1209		use utils::OpenDbError;
1210
1211		let db_source = &db_config.source;
1212
1213		let (needs_init, db) =
1214			match crate::utils::open_database::<Block>(db_source, DatabaseType::Full, false) {
1215				Ok(db) => (false, db),
1216				Err(OpenDbError::DoesNotExist) => {
1217					let db =
1218						crate::utils::open_database::<Block>(db_source, DatabaseType::Full, true)?;
1219					(true, db)
1220				},
1221				Err(as_is) => return Err(as_is.into()),
1222			};
1223
1224		Self::from_database(db as Arc<_>, canonicalization_delay, &db_config, needs_init)
1225	}
1226
1227	/// Reset the shared trie cache.
1228	pub fn reset_trie_cache(&self) {
1229		if let Some(cache) = &self.shared_trie_cache {
1230			cache.reset();
1231		}
1232	}
1233
1234	/// Create new memory-backed client backend for tests.
1235	#[cfg(any(test, feature = "test-helpers"))]
1236	pub fn new_test(blocks_pruning: u32, canonicalization_delay: u64) -> Self {
1237		Self::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), canonicalization_delay)
1238	}
1239
1240	/// Create new memory-backed client backend for tests with custom pruning filters.
1241	#[cfg(any(test, feature = "test-helpers"))]
1242	pub fn new_test_with_pruning_filters(
1243		blocks_pruning: u32,
1244		canonicalization_delay: u64,
1245		pruning_filters: Vec<Arc<dyn PruningFilter>>,
1246	) -> Self {
1247		Self::new_test_with_tx_storage_and_filters(
1248			BlocksPruning::Some(blocks_pruning),
1249			canonicalization_delay,
1250			pruning_filters,
1251		)
1252	}
1253
1254	/// Create new memory-backed client backend for tests.
1255	#[cfg(any(test, feature = "test-helpers"))]
1256	pub fn new_test_with_tx_storage(
1257		blocks_pruning: BlocksPruning,
1258		canonicalization_delay: u64,
1259	) -> Self {
1260		Self::new_test_with_tx_storage_and_filters(
1261			blocks_pruning,
1262			canonicalization_delay,
1263			Default::default(),
1264		)
1265	}
1266
1267	/// Create new memory-backed client backend for tests with custom pruning filters.
1268	#[cfg(any(test, feature = "test-helpers"))]
1269	pub fn new_test_with_tx_storage_and_filters(
1270		blocks_pruning: BlocksPruning,
1271		canonicalization_delay: u64,
1272		pruning_filters: Vec<Arc<dyn PruningFilter>>,
1273	) -> Self {
1274		let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS);
1275		let db = sp_database::as_database(db);
1276		let state_pruning = match blocks_pruning {
1277			BlocksPruning::KeepAll => PruningMode::ArchiveAll,
1278			BlocksPruning::KeepFinalized => PruningMode::ArchiveCanonical,
1279			BlocksPruning::Some(n) => PruningMode::blocks_pruning(n),
1280		};
1281		let db_setting = DatabaseSettings {
1282			trie_cache_maximum_size: Some(16 * 1024 * 1024),
1283			state_pruning: Some(state_pruning),
1284			source: DatabaseSource::Custom { db, require_create_flag: true },
1285			blocks_pruning,
1286			pruning_filters,
1287			metrics_registry: None,
1288		};
1289
1290		Self::new(db_setting, canonicalization_delay).expect("failed to create test-db")
1291	}
1292
1293	/// Expose the Database that is used by this backend.
1294	/// The second argument is the Column that stores the State.
1295	///
1296	/// Should only be needed for benchmarking.
1297	#[cfg(feature = "runtime-benchmarks")]
1298	pub fn expose_db(&self) -> (Arc<dyn sp_database::Database<DbHash>>, sp_database::ColumnId) {
1299		(self.storage.db.clone(), columns::STATE)
1300	}
1301
1302	/// Expose the Storage that is used by this backend.
1303	///
1304	/// Should only be needed for benchmarking.
1305	#[cfg(feature = "runtime-benchmarks")]
1306	pub fn expose_storage(&self) -> Arc<dyn sp_state_machine::Storage<HashingFor<Block>>> {
1307		self.storage.clone()
1308	}
1309
1310	/// Expose the shared trie cache that is used by this backend.
1311	///
1312	/// Should only be needed for benchmarking.
1313	#[cfg(feature = "runtime-benchmarks")]
1314	pub fn expose_shared_trie_cache(
1315		&self,
1316	) -> Option<sp_trie::cache::SharedTrieCache<HashingFor<Block>>> {
1317		self.shared_trie_cache.clone()
1318	}
1319
1320	fn from_database(
1321		db: Arc<dyn Database<DbHash>>,
1322		canonicalization_delay: u64,
1323		config: &DatabaseSettings,
1324		should_init: bool,
1325	) -> ClientResult<Self> {
1326		let mut db_init_transaction = Transaction::new();
1327
1328		let requested_state_pruning = config.state_pruning.clone();
1329		let state_meta_db = StateMetaDb(db.clone());
1330		let map_e = sp_blockchain::Error::from_state_db;
1331
1332		let (state_db_init_commit_set, state_db) = StateDb::open(
1333			state_meta_db,
1334			requested_state_pruning,
1335			!db.supports_ref_counting(),
1336			should_init,
1337		)
1338		.map_err(map_e)?;
1339
1340		apply_state_commit(&mut db_init_transaction, state_db_init_commit_set);
1341
1342		let state_pruning_used = state_db.pruning_mode();
1343		let is_archive_pruning = state_pruning_used.is_archive();
1344		let blockchain = BlockchainDb::new(db.clone())?;
1345
1346		let storage_db =
1347			StorageDb { db: db.clone(), state_db, prefix_keys: !db.supports_ref_counting() };
1348
1349		let offchain_storage = offchain::LocalStorage::new(db.clone());
1350
1351		let shared_trie_cache = config.trie_cache_maximum_size.map(|maximum_size| {
1352			let system_memory = sysinfo::System::new_all();
1353			let used_memory = system_memory.used_memory();
1354			let total_memory = system_memory.total_memory();
1355
1356			debug!("Initializing shared trie cache with size {} bytes, {}% of total memory", maximum_size, (maximum_size as f64 / total_memory as f64 * 100.0));
1357			if maximum_size as u64 > total_memory - used_memory {
1358				warn!(
1359					"Not enough memory to initialize shared trie cache. Cache size: {} bytes. System memory: used {} bytes, total {} bytes",
1360					maximum_size, used_memory, total_memory,
1361				);
1362			}
1363
1364			SharedTrieCache::new(sp_trie::cache::CacheSize::new(maximum_size), config.metrics_registry.as_ref())
1365		});
1366
1367		let backend = Backend {
1368			storage: Arc::new(storage_db),
1369			offchain_storage,
1370			blockchain,
1371			canonicalization_delay,
1372			import_lock: Default::default(),
1373			is_archive: is_archive_pruning,
1374			io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)),
1375			state_usage: Arc::new(StateUsageStats::new()),
1376			blocks_pruning: config.blocks_pruning,
1377			genesis_state: RwLock::new(None),
1378			shared_trie_cache,
1379			pruning_filters: config.pruning_filters.clone(),
1380		};
1381
1382		// Older DB versions have no last state key. Check if the state is available and set it.
1383		let info = backend.blockchain.info();
1384		if info.finalized_state.is_none() &&
1385			info.finalized_hash != Default::default() &&
1386			sc_client_api::Backend::have_state_at(
1387				&backend,
1388				info.finalized_hash,
1389				info.finalized_number,
1390			) {
1391			backend.blockchain.update_meta(MetaUpdate {
1392				hash: info.finalized_hash,
1393				number: info.finalized_number,
1394				is_best: info.finalized_hash == info.best_hash,
1395				is_finalized: true,
1396				with_state: true,
1397			});
1398		}
1399
1400		// Non archive nodes cannot fill the missing block gap with bodies.
1401		// If the gap is present, it means that every restart will try to fill the gap:
1402		// - a block request is made for each and every block in the gap
1403		// - the request is fulfilled putting pressure on the network and other nodes
1404		// - upon receiving the block, the block cannot be executed since the state
1405		//  of the parent block might have been discarded
1406		// - then the sync engine closes the gap in memory, but never in DB.
1407		//
1408		// This leads to inefficient syncing and high CPU usage on every restart. To mitigate this,
1409		// remove the gap from the DB if we detect it and the current node is not an archive.
1410		match (backend.is_archive, info.block_gap) {
1411			(false, Some(gap)) if matches!(gap.gap_type, BlockGapType::MissingBody) => {
1412				warn!(
1413					"Detected a missing body gap for non-archive nodes. Removing the gap={:?}",
1414					gap
1415				);
1416
1417				db_init_transaction.remove(columns::META, meta_keys::BLOCK_GAP);
1418				db_init_transaction.remove(columns::META, meta_keys::BLOCK_GAP_VERSION);
1419				backend.blockchain.update_block_gap(None);
1420			},
1421			_ => {},
1422		}
1423
1424		db.commit(db_init_transaction)?;
1425
1426		Ok(backend)
1427	}
1428
1429	/// Handle setting head within a transaction. `route_to` should be the last
1430	/// block that existed in the database. `best_to` should be the best block
1431	/// to be set.
1432	///
1433	/// In the case where the new best block is a block to be imported, `route_to`
1434	/// should be the parent of `best_to`. In the case where we set an existing block
1435	/// to be best, `route_to` should equal to `best_to`.
1436	fn set_head_with_transaction(
1437		&self,
1438		transaction: &mut Transaction<DbHash>,
1439		route_to: Block::Hash,
1440		best_to: (NumberFor<Block>, Block::Hash),
1441	) -> ClientResult<(Vec<Block::Hash>, Vec<Block::Hash>)> {
1442		let mut enacted = Vec::default();
1443		let mut retracted = Vec::default();
1444
1445		let (best_number, best_hash) = best_to;
1446
1447		let meta = self.blockchain.meta.read();
1448
1449		if meta.best_number.saturating_sub(best_number).saturated_into::<u64>() >
1450			self.canonicalization_delay
1451		{
1452			return Err(sp_blockchain::Error::SetHeadTooOld);
1453		}
1454
1455		let parent_exists =
1456			self.blockchain.status(route_to)? == sp_blockchain::BlockStatus::InChain;
1457
1458		// Cannot find tree route with empty DB or when imported a detached block.
1459		if meta.best_hash != Default::default() && parent_exists {
1460			let tree_route = sp_blockchain::tree_route(&self.blockchain, meta.best_hash, route_to)?;
1461
1462			// uncanonicalize: check safety violations and ensure the numbers no longer
1463			// point to these block hashes in the key mapping.
1464			for r in tree_route.retracted() {
1465				if r.hash == meta.finalized_hash {
1466					warn!(
1467						"Potential safety failure: reverting finalized block {:?}",
1468						(&r.number, &r.hash)
1469					);
1470
1471					return Err(sp_blockchain::Error::NotInFinalizedChain);
1472				}
1473
1474				retracted.push(r.hash);
1475				utils::remove_number_to_key_mapping(transaction, columns::KEY_LOOKUP, r.number)?;
1476			}
1477
1478			// canonicalize: set the number lookup to map to this block's hash.
1479			for e in tree_route.enacted() {
1480				enacted.push(e.hash);
1481				utils::insert_number_to_key_mapping(
1482					transaction,
1483					columns::KEY_LOOKUP,
1484					e.number,
1485					e.hash,
1486				)?;
1487			}
1488		}
1489
1490		let lookup_key = utils::number_and_hash_to_lookup_key(best_number, &best_hash)?;
1491		transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, lookup_key);
1492		utils::insert_number_to_key_mapping(
1493			transaction,
1494			columns::KEY_LOOKUP,
1495			best_number,
1496			best_hash,
1497		)?;
1498
1499		Ok((enacted, retracted))
1500	}
1501
1502	fn ensure_sequential_finalization(
1503		&self,
1504		header: &Block::Header,
1505		last_finalized: Option<Block::Hash>,
1506	) -> ClientResult<()> {
1507		let last_finalized =
1508			last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash);
1509		if last_finalized != self.blockchain.meta.read().genesis_hash &&
1510			*header.parent_hash() != last_finalized
1511		{
1512			return Err(sp_blockchain::Error::NonSequentialFinalization(format!(
1513				"Last finalized {last_finalized:?} not parent of {:?}",
1514				header.hash()
1515			)));
1516		}
1517		Ok(())
1518	}
1519
1520	/// `remove_displaced` can be set to `false` if this is not the last of many subsequent calls
1521	/// for performance reasons.
1522	fn finalize_block_with_transaction(
1523		&self,
1524		transaction: &mut Transaction<DbHash>,
1525		hash: Block::Hash,
1526		header: &Block::Header,
1527		last_finalized: Option<Block::Hash>,
1528		justification: Option<Justification>,
1529		current_transaction_justifications: &mut HashMap<Block::Hash, Justification>,
1530		remove_displaced: bool,
1531	) -> ClientResult<MetaUpdate<Block>> {
1532		// TODO: ensure best chain contains this block.
1533		let number = *header.number();
1534		self.ensure_sequential_finalization(header, last_finalized)?;
1535		let with_state = sc_client_api::Backend::have_state_at(self, hash, number);
1536
1537		self.note_finalized(
1538			transaction,
1539			header,
1540			hash,
1541			with_state,
1542			current_transaction_justifications,
1543			remove_displaced,
1544		)?;
1545
1546		if let Some(justification) = justification {
1547			transaction.set_from_vec(
1548				columns::JUSTIFICATIONS,
1549				&utils::number_and_hash_to_lookup_key(number, hash)?,
1550				Justifications::from(justification.clone()).encode(),
1551			);
1552			current_transaction_justifications.insert(hash, justification);
1553		}
1554		Ok(MetaUpdate { hash, number, is_best: false, is_finalized: true, with_state })
1555	}
1556
1557	// performs forced canonicalization with a delay after importing a non-finalized block.
1558	fn force_delayed_canonicalize(
1559		&self,
1560		transaction: &mut Transaction<DbHash>,
1561	) -> ClientResult<()> {
1562		let best_canonical = match self.storage.state_db.last_canonicalized() {
1563			LastCanonicalized::None => 0,
1564			LastCanonicalized::Block(b) => b,
1565			// Nothing needs to be done when canonicalization is not happening.
1566			LastCanonicalized::NotCanonicalizing => return Ok(()),
1567		};
1568
1569		let info = self.blockchain.info();
1570		let best_number: u64 = self.blockchain.info().best_number.saturated_into();
1571
1572		for to_canonicalize in
1573			best_canonical + 1..=best_number.saturating_sub(self.canonicalization_delay)
1574		{
1575			let hash_to_canonicalize = sc_client_api::blockchain::HeaderBackend::hash(
1576				&self.blockchain,
1577				to_canonicalize.saturated_into(),
1578			)?
1579			.ok_or_else(|| {
1580				let best_hash = info.best_hash;
1581
1582				sp_blockchain::Error::Backend(format!(
1583					"Can't canonicalize missing block number #{to_canonicalize} when for best block {best_hash:?} (#{best_number})",
1584				))
1585			})?;
1586
1587			if !sc_client_api::Backend::have_state_at(
1588				self,
1589				hash_to_canonicalize,
1590				to_canonicalize.saturated_into(),
1591			) {
1592				return Ok(());
1593			}
1594
1595			trace!(target: "db", "Canonicalize block #{to_canonicalize} ({hash_to_canonicalize:?})");
1596			let commit = self.storage.state_db.canonicalize_block(&hash_to_canonicalize).map_err(
1597				sp_blockchain::Error::from_state_db::<
1598					sc_state_db::Error<sp_database::error::DatabaseError>,
1599				>,
1600			)?;
1601			apply_state_commit(transaction, commit);
1602		}
1603
1604		Ok(())
1605	}
1606
1607	fn try_commit_operation(&self, mut operation: BlockImportOperation<Block>) -> ClientResult<()> {
1608		let mut transaction = Transaction::new();
1609
1610		operation.apply_aux(&mut transaction);
1611		operation.apply_offchain(&mut transaction);
1612
1613		let mut meta_updates = Vec::with_capacity(operation.finalized_blocks.len());
1614		let (best_num, mut last_finalized_hash, mut last_finalized_num, mut block_gap) = {
1615			let meta = self.blockchain.meta.read();
1616			(meta.best_number, meta.finalized_hash, meta.finalized_number, meta.block_gap)
1617		};
1618
1619		let mut block_gap_updated = false;
1620
1621		let mut current_transaction_justifications: HashMap<Block::Hash, Justification> =
1622			HashMap::new();
1623		let mut finalized_blocks = operation.finalized_blocks.into_iter().peekable();
1624		while let Some((block_hash, justification)) = finalized_blocks.next() {
1625			let block_header = self.blockchain.expect_header(block_hash)?;
1626			meta_updates.push(self.finalize_block_with_transaction(
1627				&mut transaction,
1628				block_hash,
1629				&block_header,
1630				Some(last_finalized_hash),
1631				justification,
1632				&mut current_transaction_justifications,
1633				finalized_blocks.peek().is_none(),
1634			)?);
1635			last_finalized_hash = block_hash;
1636			last_finalized_num = *block_header.number();
1637		}
1638
1639		let imported = if let Some(pending_block) = operation.pending_block {
1640			let hash = pending_block.header.hash();
1641
1642			let parent_hash = *pending_block.header.parent_hash();
1643			let number = *pending_block.header.number();
1644			let highest_leaf = self
1645				.blockchain
1646				.leaves
1647				.read()
1648				.highest_leaf()
1649				.map(|(n, _)| n)
1650				.unwrap_or(Zero::zero());
1651			let header_exists_in_db =
1652				number <= highest_leaf && self.blockchain.header(hash)?.is_some();
1653			// Body in DB (not incoming block) - needed to update gap when adding body to existing
1654			// header.
1655			let body_exists_in_db = self.blockchain.body(hash)?.is_some();
1656			// Incoming block has body - used for fast sync gap handling.
1657			let incoming_has_body = pending_block.body.is_some();
1658
1659			// blocks are keyed by number + hash.
1660			let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?;
1661
1662			if pending_block.leaf_state.is_best() {
1663				self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?;
1664			};
1665
1666			utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash)?;
1667
1668			transaction.set_from_vec(columns::HEADER, &lookup_key, pending_block.header.encode());
1669			if let Some(body) = pending_block.body {
1670				// If we have any index operations we save block in the new format with indexed
1671				// extrinsic headers Otherwise we save the body as a single blob.
1672				if operation.index_ops.is_empty() {
1673					transaction.set_from_vec(columns::BODY, &lookup_key, body.encode());
1674				} else {
1675					let body =
1676						apply_index_ops::<Block>(&mut transaction, body, operation.index_ops);
1677					transaction.set_from_vec(columns::BODY_INDEX, &lookup_key, body);
1678				}
1679			}
1680			if let Some(body) = pending_block.indexed_body {
1681				apply_indexed_body::<Block>(&mut transaction, body);
1682			}
1683			if let Some(justifications) = pending_block.justifications {
1684				transaction.set_from_vec(
1685					columns::JUSTIFICATIONS,
1686					&lookup_key,
1687					justifications.encode(),
1688				);
1689			}
1690
1691			if number.is_zero() {
1692				transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref());
1693
1694				if operation.commit_state {
1695					transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key);
1696				} else {
1697					// When we don't want to commit the genesis state, we still preserve it in
1698					// memory to bootstrap consensus. It is queried for an initial list of
1699					// authorities, etc.
1700					*self.genesis_state.write() = Some(Arc::new(DbGenesisStorage::new(
1701						*pending_block.header.state_root(),
1702						operation.db_updates.clone(),
1703					)));
1704				}
1705			}
1706
1707			let finalized = if operation.commit_state {
1708				let mut changeset: sc_state_db::ChangeSet<Vec<u8>> =
1709					sc_state_db::ChangeSet::default();
1710				let mut ops: u64 = 0;
1711				let mut bytes: u64 = 0;
1712				let mut removal: u64 = 0;
1713				let mut bytes_removal: u64 = 0;
1714				for (mut key, (val, rc)) in operation.db_updates.drain() {
1715					self.storage.db.sanitize_key(&mut key);
1716					if rc > 0 {
1717						ops += 1;
1718						bytes += key.len() as u64 + val.len() as u64;
1719						if rc == 1 {
1720							changeset.inserted.push((key, val.to_vec()));
1721						} else {
1722							changeset.inserted.push((key.clone(), val.to_vec()));
1723							for _ in 0..rc - 1 {
1724								changeset.inserted.push((key.clone(), Default::default()));
1725							}
1726						}
1727					} else if rc < 0 {
1728						removal += 1;
1729						bytes_removal += key.len() as u64;
1730						if rc == -1 {
1731							changeset.deleted.push(key);
1732						} else {
1733							for _ in 0..-rc {
1734								changeset.deleted.push(key.clone());
1735							}
1736						}
1737					}
1738				}
1739				self.state_usage.tally_writes_nodes(ops, bytes);
1740				self.state_usage.tally_removed_nodes(removal, bytes_removal);
1741
1742				let mut ops: u64 = 0;
1743				let mut bytes: u64 = 0;
1744				for (key, value) in operation
1745					.storage_updates
1746					.iter()
1747					.chain(operation.child_storage_updates.iter().flat_map(|(_, s)| s.iter()))
1748				{
1749					ops += 1;
1750					bytes += key.len() as u64;
1751					if let Some(v) = value.as_ref() {
1752						bytes += v.len() as u64;
1753					}
1754				}
1755				self.state_usage.tally_writes(ops, bytes);
1756				let number_u64 = number.saturated_into::<u64>();
1757				let commit = self
1758					.storage
1759					.state_db
1760					.insert_block(&hash, number_u64, pending_block.header.parent_hash(), changeset)
1761					.map_err(|e: sc_state_db::Error<sp_database::error::DatabaseError>| {
1762						sp_blockchain::Error::from_state_db(e)
1763					})?;
1764				apply_state_commit(&mut transaction, commit);
1765				if number <= last_finalized_num {
1766					// Canonicalize in the db when re-importing existing blocks with state.
1767					let commit = self.storage.state_db.canonicalize_block(&hash).map_err(
1768						sp_blockchain::Error::from_state_db::<
1769							sc_state_db::Error<sp_database::error::DatabaseError>,
1770						>,
1771					)?;
1772					apply_state_commit(&mut transaction, commit);
1773					meta_updates.push(MetaUpdate {
1774						hash,
1775						number,
1776						is_best: false,
1777						is_finalized: true,
1778						with_state: true,
1779					});
1780				}
1781
1782				// Check if need to finalize. Genesis is always finalized instantly.
1783				let finalized = number_u64 == 0 || pending_block.leaf_state.is_final();
1784				finalized
1785			} else {
1786				(number.is_zero() && last_finalized_num.is_zero()) ||
1787					pending_block.leaf_state.is_final()
1788			};
1789
1790			let header = &pending_block.header;
1791			let is_best = pending_block.leaf_state.is_best();
1792			trace!(
1793				target: "db",
1794				"DB Commit {hash:?} ({number}), best={is_best}, state={}, header_in_db={header_exists_in_db} body_in_db={body_exists_in_db} incoming_body={incoming_has_body}, finalized={finalized}",
1795				operation.commit_state,
1796			);
1797
1798			self.state_usage.merge_sm(operation.old_state.usage_info());
1799
1800			// release state reference so that it can be finalized
1801			// VERY IMPORTANT
1802			drop(operation.old_state);
1803
1804			if finalized {
1805				// TODO: ensure best chain contains this block.
1806				self.ensure_sequential_finalization(header, Some(last_finalized_hash))?;
1807				let mut current_transaction_justifications = HashMap::new();
1808				self.note_finalized(
1809					&mut transaction,
1810					header,
1811					hash,
1812					operation.commit_state,
1813					&mut current_transaction_justifications,
1814					true,
1815				)?;
1816			} else {
1817				// canonicalize blocks which are old enough, regardless of finality.
1818				self.force_delayed_canonicalize(&mut transaction)?
1819			}
1820
1821			if !header_exists_in_db {
1822				// Add a new leaf if the block has the potential to be finalized.
1823				if pending_block.register_as_leaf &&
1824					(number > last_finalized_num || last_finalized_num.is_zero())
1825				{
1826					let mut leaves = self.blockchain.leaves.write();
1827					leaves.import(hash, number, parent_hash);
1828					leaves.prepare_transaction(
1829						&mut transaction,
1830						columns::META,
1831						meta_keys::LEAF_PREFIX,
1832					);
1833				}
1834
1835				let mut children = children::read_children(
1836					&*self.storage.db,
1837					columns::META,
1838					meta_keys::CHILDREN_PREFIX,
1839					parent_hash,
1840				)?;
1841				if !children.contains(&hash) {
1842					children.push(hash);
1843					children::write_children(
1844						&mut transaction,
1845						columns::META,
1846						meta_keys::CHILDREN_PREFIX,
1847						parent_hash,
1848						children,
1849					);
1850				}
1851			}
1852
1853			let should_check_block_gap = !header_exists_in_db || !body_exists_in_db;
1854			debug!(
1855				target: "db",
1856				"should_check_block_gap = {should_check_block_gap}",
1857			);
1858
1859			if should_check_block_gap {
1860				let update_gap =
1861					|transaction: &mut Transaction<DbHash>,
1862					 new_gap: BlockGap<NumberFor<Block>>,
1863					 block_gap: &mut Option<BlockGap<NumberFor<Block>>>| {
1864						transaction.set(columns::META, meta_keys::BLOCK_GAP, &new_gap.encode());
1865						transaction.set(
1866							columns::META,
1867							meta_keys::BLOCK_GAP_VERSION,
1868							&BLOCK_GAP_CURRENT_VERSION.encode(),
1869						);
1870						block_gap.replace(new_gap);
1871						debug!(target: "db", "Update block gap. {block_gap:?}");
1872					};
1873
1874				let remove_gap =
1875					|transaction: &mut Transaction<DbHash>,
1876					 block_gap: &mut Option<BlockGap<NumberFor<Block>>>| {
1877						transaction.remove(columns::META, meta_keys::BLOCK_GAP);
1878						transaction.remove(columns::META, meta_keys::BLOCK_GAP_VERSION);
1879						*block_gap = None;
1880						debug!(target: "db", "Removed block gap.");
1881					};
1882
1883				if let Some(mut gap) = block_gap {
1884					match gap.gap_type {
1885						BlockGapType::MissingHeaderAndBody => {
1886							// Handle blocks at gap start or immediately following (possibly
1887							// indicating blocks already imported during warp sync where
1888							// start was not updated).
1889							if number == gap.start {
1890								gap.start = number + One::one();
1891								utils::insert_number_to_key_mapping(
1892									&mut transaction,
1893									columns::KEY_LOOKUP,
1894									number,
1895									hash,
1896								)?;
1897								if gap.start > gap.end {
1898									remove_gap(&mut transaction, &mut block_gap);
1899								} else {
1900									update_gap(&mut transaction, gap, &mut block_gap);
1901								}
1902								block_gap_updated = true;
1903							}
1904						},
1905						BlockGapType::MissingBody => {
1906							// Gap increased when syncing the header chain during fast sync.
1907							if number == gap.end + One::one() && !incoming_has_body {
1908								gap.end += One::one();
1909								utils::insert_number_to_key_mapping(
1910									&mut transaction,
1911									columns::KEY_LOOKUP,
1912									number,
1913									hash,
1914								)?;
1915								update_gap(&mut transaction, gap, &mut block_gap);
1916								block_gap_updated = true;
1917							// Gap decreased when downloading the full blocks.
1918							} else if number == gap.start && incoming_has_body {
1919								gap.start += One::one();
1920								if gap.start > gap.end {
1921									remove_gap(&mut transaction, &mut block_gap);
1922								} else {
1923									update_gap(&mut transaction, gap, &mut block_gap);
1924								}
1925								block_gap_updated = true;
1926							}
1927						},
1928					}
1929				} else if operation.create_gap {
1930					if number > best_num + One::one() &&
1931						self.blockchain.header(parent_hash)?.is_none()
1932					{
1933						let gap = BlockGap {
1934							start: best_num + One::one(),
1935							end: number - One::one(),
1936							gap_type: BlockGapType::MissingHeaderAndBody,
1937						};
1938						update_gap(&mut transaction, gap, &mut block_gap);
1939						block_gap_updated = true;
1940						debug!(target: "db", "Detected block gap (warp sync) {block_gap:?}");
1941					} else if number == best_num + One::one() &&
1942						self.blockchain.header(parent_hash)?.is_some() &&
1943						!incoming_has_body
1944					{
1945						let gap = BlockGap {
1946							start: number,
1947							end: number,
1948							gap_type: BlockGapType::MissingBody,
1949						};
1950						update_gap(&mut transaction, gap, &mut block_gap);
1951						block_gap_updated = true;
1952						debug!(target: "db", "Detected block gap (fast sync) {block_gap:?}");
1953					}
1954				}
1955			}
1956
1957			meta_updates.push(MetaUpdate {
1958				hash,
1959				number,
1960				is_best: pending_block.leaf_state.is_best(),
1961				is_finalized: finalized,
1962				with_state: operation.commit_state,
1963			});
1964			Some((pending_block.header, hash))
1965		} else {
1966			None
1967		};
1968
1969		if let Some(set_head) = operation.set_head {
1970			if let Some(header) =
1971				sc_client_api::blockchain::HeaderBackend::header(&self.blockchain, set_head)?
1972			{
1973				let number = header.number();
1974				let hash = header.hash();
1975
1976				self.set_head_with_transaction(&mut transaction, hash, (*number, hash))?;
1977
1978				meta_updates.push(MetaUpdate {
1979					hash,
1980					number: *number,
1981					is_best: true,
1982					is_finalized: false,
1983					with_state: false,
1984				});
1985			} else {
1986				return Err(sp_blockchain::Error::UnknownBlock(format!(
1987					"Cannot set head {set_head:?}",
1988				)));
1989			}
1990		}
1991
1992		self.storage.db.commit(transaction)?;
1993
1994		// `reset_storage == true` means the entire state got replaced.
1995		// In this case we optimize the `STATE` column to improve read performance.
1996		if operation.reset_storage {
1997			if let Err(e) = self.storage.db.optimize_db_col(columns::STATE) {
1998				warn!(target: "db", "Failed to optimize database after state import: {e:?}");
1999			}
2000		}
2001
2002		// Apply all in-memory state changes.
2003		// Code beyond this point can't fail.
2004
2005		if let Some((header, hash)) = imported {
2006			trace!(target: "db", "DB Commit done {hash:?}");
2007			let header_metadata = CachedHeaderMetadata::from(&header);
2008			self.blockchain.insert_header_metadata(header_metadata.hash, header_metadata);
2009			cache_header(&mut self.blockchain.header_cache.lock(), hash, Some(header));
2010		}
2011
2012		for m in meta_updates {
2013			self.blockchain.update_meta(m);
2014		}
2015		if block_gap_updated {
2016			self.blockchain.update_block_gap(block_gap);
2017		}
2018
2019		Ok(())
2020	}
2021
2022	// Write stuff to a transaction after a new block is finalized. This canonicalizes finalized
2023	// blocks. Fails if called with a block which was not a child of the last finalized block.
2024	/// `remove_displaced` can be set to `false` if this is not the last of many subsequent calls
2025	/// for performance reasons.
2026	fn note_finalized(
2027		&self,
2028		transaction: &mut Transaction<DbHash>,
2029		f_header: &Block::Header,
2030		f_hash: Block::Hash,
2031		with_state: bool,
2032		current_transaction_justifications: &mut HashMap<Block::Hash, Justification>,
2033		remove_displaced: bool,
2034	) -> ClientResult<()> {
2035		let f_num = *f_header.number();
2036
2037		let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash)?;
2038		if with_state {
2039			transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key.clone());
2040		}
2041		transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key);
2042
2043		let requires_canonicalization = match self.storage.state_db.last_canonicalized() {
2044			LastCanonicalized::None => true,
2045			LastCanonicalized::Block(b) => f_num.saturated_into::<u64>() > b,
2046			LastCanonicalized::NotCanonicalizing => false,
2047		};
2048
2049		if requires_canonicalization && sc_client_api::Backend::have_state_at(self, f_hash, f_num) {
2050			let commit = self.storage.state_db.canonicalize_block(&f_hash).map_err(
2051				sp_blockchain::Error::from_state_db::<
2052					sc_state_db::Error<sp_database::error::DatabaseError>,
2053				>,
2054			)?;
2055			apply_state_commit(transaction, commit);
2056		}
2057
2058		if remove_displaced {
2059			let new_displaced = self.blockchain.displaced_leaves_after_finalizing(
2060				f_hash,
2061				f_num,
2062				*f_header.parent_hash(),
2063			)?;
2064
2065			self.blockchain.leaves.write().remove_displaced_leaves(FinalizationOutcome::new(
2066				new_displaced.displaced_leaves.iter().copied(),
2067			));
2068
2069			if !matches!(self.blocks_pruning, BlocksPruning::KeepAll) {
2070				self.prune_displaced_branches(transaction, &new_displaced)?;
2071			}
2072		}
2073
2074		self.prune_blocks(transaction, f_num, current_transaction_justifications)?;
2075
2076		Ok(())
2077	}
2078
2079	fn prune_blocks(
2080		&self,
2081		transaction: &mut Transaction<DbHash>,
2082		finalized_number: NumberFor<Block>,
2083		current_transaction_justifications: &mut HashMap<Block::Hash, Justification>,
2084	) -> ClientResult<()> {
2085		if let BlocksPruning::Some(blocks_pruning) = self.blocks_pruning {
2086			// Always keep the last finalized block
2087			let keep = std::cmp::max(blocks_pruning, 1);
2088			if finalized_number >= keep.into() {
2089				let number = finalized_number.saturating_sub(keep.into());
2090
2091				// Before we prune a block, check if it is pinned
2092				if let Some(hash) = self.blockchain.hash(number)? {
2093					// Check if any pruning filter wants to preserve this block.
2094					// We need to check both the current transaction justifications (not yet in DB)
2095					// and the DB itself (for justifications from previous transactions).
2096					if !self.pruning_filters.is_empty() {
2097						let justifications = match current_transaction_justifications.get(&hash) {
2098							Some(j) => Some(Justifications::from(j.clone())),
2099							None => self.blockchain.justifications(hash)?,
2100						};
2101
2102						let should_retain = justifications
2103							.map(|j| self.pruning_filters.iter().any(|f| f.should_retain(&j)))
2104							.unwrap_or(false);
2105
2106						// We can just return here, pinning can be ignored since the block will
2107						// remain in the DB.
2108						if should_retain {
2109							debug!(
2110								target: "db",
2111								"Preserving block #{number} ({hash}) due to keep predicate match"
2112							);
2113							return Ok(());
2114						}
2115					}
2116
2117					self.blockchain.insert_persisted_body_if_pinned(hash)?;
2118
2119					// If the block was finalized in this transaction, it will not be in the db
2120					// yet.
2121					if let Some(justification) = current_transaction_justifications.remove(&hash) {
2122						self.blockchain.insert_justifications_if_pinned(hash, justification);
2123					} else {
2124						self.blockchain.insert_persisted_justifications_if_pinned(hash)?;
2125					}
2126				};
2127
2128				self.prune_block(transaction, BlockId::<Block>::number(number))?;
2129			}
2130		}
2131		Ok(())
2132	}
2133
2134	fn prune_displaced_branches(
2135		&self,
2136		transaction: &mut Transaction<DbHash>,
2137		displaced: &DisplacedLeavesAfterFinalization<Block>,
2138	) -> ClientResult<()> {
2139		// Discard all blocks from displaced branches
2140		for &hash in displaced.displaced_blocks.iter() {
2141			self.blockchain.insert_persisted_body_if_pinned(hash)?;
2142			self.prune_block(transaction, BlockId::<Block>::hash(hash))?;
2143		}
2144		Ok(())
2145	}
2146
2147	fn prune_block(
2148		&self,
2149		transaction: &mut Transaction<DbHash>,
2150		id: BlockId<Block>,
2151	) -> ClientResult<()> {
2152		debug!(target: "db", "Removing block #{id}");
2153		utils::remove_from_db(
2154			transaction,
2155			&*self.storage.db,
2156			columns::KEY_LOOKUP,
2157			columns::BODY,
2158			id,
2159		)?;
2160		utils::remove_from_db(
2161			transaction,
2162			&*self.storage.db,
2163			columns::KEY_LOOKUP,
2164			columns::JUSTIFICATIONS,
2165			id,
2166		)?;
2167		if let Some(index) =
2168			read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)?
2169		{
2170			utils::remove_from_db(
2171				transaction,
2172				&*self.storage.db,
2173				columns::KEY_LOOKUP,
2174				columns::BODY_INDEX,
2175				id,
2176			)?;
2177			match Vec::<DbExtrinsic<Block>>::decode(&mut &index[..]) {
2178				Ok(index) => {
2179					for ex in index {
2180						match ex {
2181							DbExtrinsic::Indexed { hash, .. } => {
2182								transaction.release(columns::TRANSACTION, hash);
2183							},
2184							DbExtrinsic::MultiRenew { hashes, .. } => {
2185								for hash in hashes {
2186									transaction.release(columns::TRANSACTION, hash);
2187								}
2188							},
2189							DbExtrinsic::Full(_) => {},
2190						}
2191					}
2192				},
2193				Err(err) => {
2194					return Err(sp_blockchain::Error::Backend(format!(
2195						"Error decoding body list: {err}",
2196					)))
2197				},
2198			}
2199		}
2200		Ok(())
2201	}
2202
2203	fn empty_state(&self) -> RecordStatsState<RefTrackingState<Block>, Block> {
2204		let root = EmptyStorage::<Block>::new().0; // Empty trie
2205		let db_state = DbStateBuilder::<HashingFor<Block>>::new(self.storage.clone(), root)
2206			.with_optional_cache(self.shared_trie_cache.as_ref().map(|c| c.local_cache_untrusted()))
2207			.build();
2208		let state = RefTrackingState::new(db_state, self.storage.clone(), None);
2209		RecordStatsState::new(state, None, self.state_usage.clone())
2210	}
2211}
2212
2213fn apply_state_commit(
2214	transaction: &mut Transaction<DbHash>,
2215	commit: sc_state_db::CommitSet<Vec<u8>>,
2216) {
2217	for (key, val) in commit.data.inserted.into_iter() {
2218		transaction.set_from_vec(columns::STATE, &key[..], val);
2219	}
2220	for key in commit.data.deleted.into_iter() {
2221		transaction.remove(columns::STATE, &key[..]);
2222	}
2223	for (key, val) in commit.meta.inserted.into_iter() {
2224		transaction.set_from_vec(columns::STATE_META, &key[..], val);
2225	}
2226	for key in commit.meta.deleted.into_iter() {
2227		transaction.remove(columns::STATE_META, &key[..]);
2228	}
2229}
2230
2231fn apply_index_ops<Block: BlockT>(
2232	transaction: &mut Transaction<DbHash>,
2233	body: Vec<Block::Extrinsic>,
2234	ops: Vec<IndexOperation>,
2235) -> Vec<u8> {
2236	let mut extrinsic_index: Vec<DbExtrinsic<Block>> = Vec::with_capacity(body.len());
2237	let mut index_map = HashMap::new();
2238	let mut renewed_map: HashMap<u32, BTreeSet<DbHash>> = HashMap::new();
2239	for op in ops {
2240		match op {
2241			IndexOperation::Insert { extrinsic, hash, size } => {
2242				index_map.insert(extrinsic, (hash, size));
2243			},
2244			IndexOperation::Renew { extrinsic, hash } => {
2245				renewed_map
2246					.entry(extrinsic)
2247					.or_default()
2248					.insert(DbHash::from_slice(hash.as_ref()));
2249			},
2250		}
2251	}
2252	let mut n_inserted = 0usize;
2253	let mut n_renew_slots = 0usize;
2254	let mut n_renew_hashes = 0usize;
2255	let mut n_full = 0usize;
2256	for (index, extrinsic) in body.into_iter().enumerate() {
2257		let db_extrinsic = if let Some(hashes) = renewed_map.remove(&(index as u32)) {
2258			n_renew_slots += 1;
2259			n_renew_hashes += hashes.len();
2260			let encoded = extrinsic.encode();
2261			if hashes.len() == 1 {
2262				// Single renewal: backwards-compatible Indexed variant
2263				let hash = *hashes.iter().next().expect("len == 1; qed");
2264				transaction.reference(columns::TRANSACTION, hash);
2265				DbExtrinsic::Indexed { hash, header: encoded }
2266			} else {
2267				// Multi-renewal: bump ref counter for each hash
2268				for hash in &hashes {
2269					transaction.reference(columns::TRANSACTION, *hash);
2270				}
2271				DbExtrinsic::MultiRenew { hashes, extrinsic: encoded }
2272			}
2273		} else {
2274			match index_map.get(&(index as u32)) {
2275				Some((hash, size)) => {
2276					let encoded = extrinsic.encode();
2277					if *size as usize <= encoded.len() {
2278						n_inserted += 1;
2279						let offset = encoded.len() - *size as usize;
2280						transaction.store(
2281							columns::TRANSACTION,
2282							DbHash::from_slice(hash.as_ref()),
2283							encoded[offset..].to_vec(),
2284						);
2285						DbExtrinsic::Indexed {
2286							hash: DbHash::from_slice(hash.as_ref()),
2287							header: encoded[..offset].to_vec(),
2288						}
2289					} else {
2290						// Invalid indexed slice. Just store full data and don't index anything.
2291						n_full += 1;
2292						DbExtrinsic::Full(extrinsic)
2293					}
2294				},
2295				_ => {
2296					n_full += 1;
2297					DbExtrinsic::Full(extrinsic)
2298				},
2299			}
2300		};
2301		extrinsic_index.push(db_extrinsic);
2302	}
2303	debug!(
2304		target: "db",
2305		"DB transaction index: {} inserted, {} slots renewed ({} hashes), {} full",
2306		n_inserted,
2307		n_renew_slots,
2308		n_renew_hashes,
2309		n_full,
2310	);
2311	extrinsic_index.encode()
2312}
2313
2314fn apply_indexed_body<Block: BlockT>(transaction: &mut Transaction<DbHash>, body: Vec<Vec<u8>>) {
2315	for extrinsic in body {
2316		let hash = sp_runtime::traits::BlakeTwo256::hash(&extrinsic);
2317		transaction.store(columns::TRANSACTION, DbHash::from_slice(hash.as_ref()), extrinsic);
2318	}
2319}
2320
2321impl<Block> sc_client_api::backend::AuxStore for Backend<Block>
2322where
2323	Block: BlockT,
2324{
2325	fn insert_aux<
2326		'a,
2327		'b: 'a,
2328		'c: 'a,
2329		I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
2330		D: IntoIterator<Item = &'a &'b [u8]>,
2331	>(
2332		&self,
2333		insert: I,
2334		delete: D,
2335	) -> ClientResult<()> {
2336		let mut transaction = Transaction::new();
2337		for (k, v) in insert {
2338			transaction.set(columns::AUX, k, v);
2339		}
2340		for k in delete {
2341			transaction.remove(columns::AUX, k);
2342		}
2343		self.storage.db.commit(transaction)?;
2344		Ok(())
2345	}
2346
2347	fn get_aux(&self, key: &[u8]) -> ClientResult<Option<Vec<u8>>> {
2348		Ok(self.storage.db.get(columns::AUX, key))
2349	}
2350}
2351
2352impl<Block: BlockT> sc_client_api::backend::Backend<Block> for Backend<Block> {
2353	type BlockImportOperation = BlockImportOperation<Block>;
2354	type Blockchain = BlockchainDb<Block>;
2355	type State = RecordStatsState<RefTrackingState<Block>, Block>;
2356	type OffchainStorage = offchain::LocalStorage;
2357
2358	fn begin_operation(&self) -> ClientResult<Self::BlockImportOperation> {
2359		Ok(BlockImportOperation {
2360			pending_block: None,
2361			old_state: self.empty_state(),
2362			db_updates: PrefixedMemoryDB::default(),
2363			storage_updates: Default::default(),
2364			child_storage_updates: Default::default(),
2365			offchain_storage_updates: Default::default(),
2366			aux_ops: Vec::new(),
2367			finalized_blocks: Vec::new(),
2368			set_head: None,
2369			commit_state: false,
2370			create_gap: true,
2371			reset_storage: false,
2372			index_ops: Default::default(),
2373		})
2374	}
2375
2376	fn begin_state_operation(
2377		&self,
2378		operation: &mut Self::BlockImportOperation,
2379		block: Block::Hash,
2380	) -> ClientResult<()> {
2381		if block == Default::default() {
2382			operation.old_state = self.empty_state();
2383		} else {
2384			operation.old_state = self.state_at(block, TrieCacheContext::Untrusted)?;
2385		}
2386
2387		operation.commit_state = true;
2388		Ok(())
2389	}
2390
2391	fn commit_operation(&self, operation: Self::BlockImportOperation) -> ClientResult<()> {
2392		let usage = operation.old_state.usage_info();
2393		self.state_usage.merge_sm(usage);
2394
2395		if let Err(e) = self.try_commit_operation(operation) {
2396			let state_meta_db = StateMetaDb(self.storage.db.clone());
2397			self.storage
2398				.state_db
2399				.reset(state_meta_db)
2400				.map_err(sp_blockchain::Error::from_state_db)?;
2401			self.blockchain.clear_pinning_cache();
2402			Err(e)
2403		} else {
2404			self.storage.state_db.sync();
2405			Ok(())
2406		}
2407	}
2408
2409	fn finalize_block(
2410		&self,
2411		hash: Block::Hash,
2412		justification: Option<Justification>,
2413	) -> ClientResult<()> {
2414		let mut transaction = Transaction::new();
2415		let header = self.blockchain.expect_header(hash)?;
2416
2417		let mut current_transaction_justifications = HashMap::new();
2418		let m = self.finalize_block_with_transaction(
2419			&mut transaction,
2420			hash,
2421			&header,
2422			None,
2423			justification,
2424			&mut current_transaction_justifications,
2425			true,
2426		)?;
2427
2428		self.storage.db.commit(transaction)?;
2429		self.blockchain.update_meta(m);
2430		Ok(())
2431	}
2432
2433	fn append_justification(
2434		&self,
2435		hash: Block::Hash,
2436		justification: Justification,
2437	) -> ClientResult<()> {
2438		let mut transaction: Transaction<DbHash> = Transaction::new();
2439		let header = self.blockchain.expect_header(hash)?;
2440		let number = *header.number();
2441
2442		// Check if the block is finalized first.
2443		let is_descendent_of = is_descendent_of(&self.blockchain, None);
2444		let last_finalized = self.blockchain.last_finalized()?;
2445
2446		// We can do a quick check first, before doing a proper but more expensive check
2447		if number > self.blockchain.info().finalized_number ||
2448			(hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?)
2449		{
2450			return Err(ClientError::NotInFinalizedChain);
2451		}
2452
2453		let justifications = if let Some(mut stored_justifications) =
2454			self.blockchain.justifications(hash)?
2455		{
2456			if !stored_justifications.append(justification) {
2457				return Err(ClientError::BadJustification("Duplicate consensus engine ID".into()));
2458			}
2459			stored_justifications
2460		} else {
2461			Justifications::from(justification)
2462		};
2463
2464		transaction.set_from_vec(
2465			columns::JUSTIFICATIONS,
2466			&utils::number_and_hash_to_lookup_key(number, hash)?,
2467			justifications.encode(),
2468		);
2469
2470		self.storage.db.commit(transaction)?;
2471
2472		Ok(())
2473	}
2474
2475	fn offchain_storage(&self) -> Option<Self::OffchainStorage> {
2476		Some(self.offchain_storage.clone())
2477	}
2478
2479	fn usage_info(&self) -> Option<UsageInfo> {
2480		let (io_stats, state_stats) = self.io_stats.take_or_else(|| {
2481			(
2482				// TODO: implement DB stats and cache size retrieval
2483				kvdb::IoStats::empty(),
2484				self.state_usage.take(),
2485			)
2486		});
2487		let database_cache = MemorySize::from_bytes(0);
2488		let state_cache = MemorySize::from_bytes(
2489			self.shared_trie_cache.as_ref().map_or(0, |c| c.used_memory_size()),
2490		);
2491
2492		Some(UsageInfo {
2493			memory: MemoryInfo { state_cache, database_cache },
2494			io: IoInfo {
2495				transactions: io_stats.transactions,
2496				bytes_read: io_stats.bytes_read,
2497				bytes_written: io_stats.bytes_written,
2498				writes: io_stats.writes,
2499				reads: io_stats.reads,
2500				average_transaction_size: io_stats.avg_transaction_size() as u64,
2501				state_reads: state_stats.reads.ops,
2502				state_writes: state_stats.writes.ops,
2503				state_writes_cache: state_stats.overlay_writes.ops,
2504				state_reads_cache: state_stats.cache_reads.ops,
2505				state_writes_nodes: state_stats.nodes_writes.ops,
2506			},
2507		})
2508	}
2509
2510	fn revert(
2511		&self,
2512		n: NumberFor<Block>,
2513		revert_finalized: bool,
2514	) -> ClientResult<(NumberFor<Block>, HashSet<Block::Hash>)> {
2515		let mut reverted_finalized = HashSet::new();
2516
2517		let info = self.blockchain.info();
2518
2519		let highest_leaf = self
2520			.blockchain
2521			.leaves
2522			.read()
2523			.highest_leaf()
2524			.and_then(|(n, h)| h.last().map(|h| (n, *h)));
2525
2526		let best_number = info.best_number;
2527		let best_hash = info.best_hash;
2528
2529		let finalized = info.finalized_number;
2530
2531		let revertible = best_number - finalized;
2532		let n = if !revert_finalized && revertible < n { revertible } else { n };
2533
2534		let (n, mut number_to_revert, mut hash_to_revert) = match highest_leaf {
2535			Some((l_n, l_h)) => (n + (l_n - best_number), l_n, l_h),
2536			None => (n, best_number, best_hash),
2537		};
2538
2539		let mut revert_blocks = || -> ClientResult<NumberFor<Block>> {
2540			for c in 0..n.saturated_into::<u64>() {
2541				if number_to_revert.is_zero() {
2542					return Ok(c.saturated_into::<NumberFor<Block>>());
2543				}
2544				let mut transaction = Transaction::new();
2545				let removed = self.blockchain.header(hash_to_revert)?.ok_or_else(|| {
2546					sp_blockchain::Error::UnknownBlock(format!(
2547						"Error reverting to {hash_to_revert}. Block header not found.",
2548					))
2549				})?;
2550				let removed_hash = hash_to_revert;
2551
2552				let prev_number = number_to_revert.saturating_sub(One::one());
2553				let prev_hash =
2554					if prev_number == best_number { best_hash } else { *removed.parent_hash() };
2555
2556				if !self.have_state_at(prev_hash, prev_number) {
2557					return Ok(c.saturated_into::<NumberFor<Block>>());
2558				}
2559
2560				match self.storage.state_db.revert_one() {
2561					Some(commit) => {
2562						apply_state_commit(&mut transaction, commit);
2563
2564						number_to_revert = prev_number;
2565						hash_to_revert = prev_hash;
2566
2567						let update_finalized = number_to_revert < finalized;
2568
2569						let key = utils::number_and_hash_to_lookup_key(
2570							number_to_revert,
2571							&hash_to_revert,
2572						)?;
2573						if update_finalized {
2574							transaction.set_from_vec(
2575								columns::META,
2576								meta_keys::FINALIZED_BLOCK,
2577								key.clone(),
2578							);
2579
2580							reverted_finalized.insert(removed_hash);
2581							if let Some((hash, _)) = self.blockchain.info().finalized_state {
2582								if hash == hash_to_revert {
2583									if !number_to_revert.is_zero() &&
2584										self.have_state_at(prev_hash, prev_number)
2585									{
2586										let lookup_key = utils::number_and_hash_to_lookup_key(
2587											prev_number,
2588											prev_hash,
2589										)?;
2590										transaction.set_from_vec(
2591											columns::META,
2592											meta_keys::FINALIZED_STATE,
2593											lookup_key,
2594										);
2595									} else {
2596										transaction
2597											.remove(columns::META, meta_keys::FINALIZED_STATE);
2598									}
2599								}
2600							}
2601						}
2602
2603						transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, key);
2604						transaction.remove(columns::KEY_LOOKUP, removed_hash.as_ref());
2605						children::remove_children(
2606							&mut transaction,
2607							columns::META,
2608							meta_keys::CHILDREN_PREFIX,
2609							hash_to_revert,
2610						);
2611						self.prune_block(&mut transaction, BlockId::Hash(removed_hash))?;
2612						remove_from_db::<Block>(
2613							&mut transaction,
2614							&*self.storage.db,
2615							columns::KEY_LOOKUP,
2616							columns::HEADER,
2617							BlockId::Hash(removed_hash),
2618						)?;
2619
2620						self.storage.db.commit(transaction)?;
2621
2622						// Clean the cache
2623						self.blockchain.remove_header_metadata(removed_hash);
2624
2625						let is_best = number_to_revert < best_number;
2626
2627						self.blockchain.update_meta(MetaUpdate {
2628							hash: hash_to_revert,
2629							number: number_to_revert,
2630							is_best,
2631							is_finalized: update_finalized,
2632							with_state: false,
2633						});
2634					},
2635					None => return Ok(c.saturated_into::<NumberFor<Block>>()),
2636				}
2637			}
2638
2639			Ok(n)
2640		};
2641
2642		let reverted = revert_blocks()?;
2643
2644		let revert_leaves = || -> ClientResult<()> {
2645			let mut transaction = Transaction::new();
2646			let mut leaves = self.blockchain.leaves.write();
2647
2648			leaves.revert(hash_to_revert, number_to_revert).into_iter().try_for_each(
2649				|(h, _)| {
2650					self.blockchain.remove_header_metadata(h);
2651					transaction.remove(columns::KEY_LOOKUP, h.as_ref());
2652
2653					self.prune_block(&mut transaction, BlockId::Hash(h))?;
2654					remove_from_db::<Block>(
2655						&mut transaction,
2656						&*self.storage.db,
2657						columns::KEY_LOOKUP,
2658						columns::HEADER,
2659						BlockId::Hash(h),
2660					)?;
2661
2662					Ok::<_, ClientError>(())
2663				},
2664			)?;
2665			leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX);
2666			self.storage.db.commit(transaction)?;
2667
2668			Ok(())
2669		};
2670
2671		revert_leaves()?;
2672
2673		Ok((reverted, reverted_finalized))
2674	}
2675
2676	fn remove_leaf_block(&self, hash: Block::Hash) -> ClientResult<()> {
2677		let best_hash = self.blockchain.info().best_hash;
2678
2679		if best_hash == hash {
2680			return Err(sp_blockchain::Error::Backend(format!("Can't remove best block {hash:?}")));
2681		}
2682
2683		let hdr = self.blockchain.header_metadata(hash)?;
2684		if !self.have_state_at(hash, hdr.number) {
2685			return Err(sp_blockchain::Error::UnknownBlock(format!(
2686				"State already discarded for {hash:?}",
2687			)));
2688		}
2689
2690		let mut leaves = self.blockchain.leaves.write();
2691		if !leaves.contains(hdr.number, hash) {
2692			return Err(sp_blockchain::Error::Backend(format!(
2693				"Can't remove non-leaf block {hash:?}",
2694			)));
2695		}
2696
2697		let mut transaction = Transaction::new();
2698		if let Some(commit) = self.storage.state_db.remove(&hash) {
2699			apply_state_commit(&mut transaction, commit);
2700		}
2701		transaction.remove(columns::KEY_LOOKUP, hash.as_ref());
2702
2703		let children: Vec<_> = self
2704			.blockchain()
2705			.children(hdr.parent)?
2706			.into_iter()
2707			.filter(|child_hash| *child_hash != hash)
2708			.collect();
2709		let parent_leaf = if children.is_empty() {
2710			children::remove_children(
2711				&mut transaction,
2712				columns::META,
2713				meta_keys::CHILDREN_PREFIX,
2714				hdr.parent,
2715			);
2716			Some(hdr.parent)
2717		} else {
2718			children::write_children(
2719				&mut transaction,
2720				columns::META,
2721				meta_keys::CHILDREN_PREFIX,
2722				hdr.parent,
2723				children,
2724			);
2725			None
2726		};
2727
2728		let remove_outcome = leaves.remove(hash, hdr.number, parent_leaf);
2729		leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX);
2730		if let Err(e) = self.storage.db.commit(transaction) {
2731			if let Some(outcome) = remove_outcome {
2732				leaves.undo().undo_remove(outcome);
2733			}
2734			return Err(e.into());
2735		}
2736		self.blockchain().remove_header_metadata(hash);
2737		Ok(())
2738	}
2739
2740	fn blockchain(&self) -> &BlockchainDb<Block> {
2741		&self.blockchain
2742	}
2743
2744	fn state_at(
2745		&self,
2746		hash: Block::Hash,
2747		trie_cache_context: TrieCacheContext,
2748	) -> ClientResult<Self::State> {
2749		if hash == self.blockchain.meta.read().genesis_hash {
2750			if let Some(genesis_state) = &*self.genesis_state.read() {
2751				let root = genesis_state.root;
2752				let db_state =
2753					DbStateBuilder::<HashingFor<Block>>::new(genesis_state.clone(), root)
2754						.with_optional_cache(self.shared_trie_cache.as_ref().map(|c| {
2755							if matches!(trie_cache_context, TrieCacheContext::Trusted) {
2756								c.local_cache_trusted()
2757							} else {
2758								c.local_cache_untrusted()
2759							}
2760						}))
2761						.build();
2762
2763				let state = RefTrackingState::new(db_state, self.storage.clone(), None);
2764				return Ok(RecordStatsState::new(state, None, self.state_usage.clone()));
2765			}
2766		}
2767
2768		match self.blockchain.header_metadata(hash) {
2769			Ok(ref hdr) => {
2770				let hint = || {
2771					sc_state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref())
2772						.unwrap_or(None)
2773						.is_some()
2774				};
2775
2776				if let Ok(()) =
2777					self.storage.state_db.pin(&hash, hdr.number.saturated_into::<u64>(), hint)
2778				{
2779					let root = hdr.state_root;
2780					let db_state =
2781						DbStateBuilder::<HashingFor<Block>>::new(self.storage.clone(), root)
2782							.with_optional_cache(self.shared_trie_cache.as_ref().map(|c| {
2783								if matches!(trie_cache_context, TrieCacheContext::Trusted) {
2784									c.local_cache_trusted()
2785								} else {
2786									c.local_cache_untrusted()
2787								}
2788							}))
2789							.build();
2790					let state = RefTrackingState::new(db_state, self.storage.clone(), Some(hash));
2791					Ok(RecordStatsState::new(state, Some(hash), self.state_usage.clone()))
2792				} else {
2793					Err(sp_blockchain::Error::UnknownBlock(format!(
2794						"State already discarded for {hash:?}",
2795					)))
2796				}
2797			},
2798			Err(e) => Err(e),
2799		}
2800	}
2801
2802	fn have_state_at(&self, hash: Block::Hash, number: NumberFor<Block>) -> bool {
2803		if self.is_archive {
2804			match self.blockchain.header_metadata(hash) {
2805				Ok(header) => sp_state_machine::Storage::get(
2806					self.storage.as_ref(),
2807					&header.state_root,
2808					(&[], None),
2809				)
2810				.unwrap_or(None)
2811				.is_some(),
2812				_ => false,
2813			}
2814		} else {
2815			match self.storage.state_db.is_pruned(&hash, number.saturated_into::<u64>()) {
2816				IsPruned::Pruned => false,
2817				IsPruned::NotPruned => true,
2818				IsPruned::MaybePruned => match self.blockchain.header_metadata(hash) {
2819					Ok(header) => sp_state_machine::Storage::get(
2820						self.storage.as_ref(),
2821						&header.state_root,
2822						(&[], None),
2823					)
2824					.unwrap_or(None)
2825					.is_some(),
2826					_ => false,
2827				},
2828			}
2829		}
2830	}
2831
2832	fn get_import_lock(&self) -> &RwLock<()> {
2833		&self.import_lock
2834	}
2835
2836	fn requires_full_sync(&self) -> bool {
2837		matches!(
2838			self.storage.state_db.pruning_mode(),
2839			PruningMode::ArchiveAll | PruningMode::ArchiveCanonical
2840		)
2841	}
2842
2843	fn pin_block(&self, hash: <Block as BlockT>::Hash) -> sp_blockchain::Result<()> {
2844		let hint = || {
2845			let header_metadata = self.blockchain.header_metadata(hash);
2846			header_metadata
2847				.map(|hdr| {
2848					sc_state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref())
2849						.unwrap_or(None)
2850						.is_some()
2851				})
2852				.unwrap_or(false)
2853		};
2854
2855		if let Some(number) = self.blockchain.number(hash)? {
2856			self.storage.state_db.pin(&hash, number.saturated_into::<u64>(), hint).map_err(
2857				|_| {
2858					sp_blockchain::Error::UnknownBlock(format!(
2859						"Unable to pin: state already discarded for `{hash:?}`",
2860					))
2861				},
2862			)?;
2863		} else {
2864			return Err(ClientError::UnknownBlock(format!(
2865				"Can not pin block with hash `{hash:?}`. Block not found.",
2866			)));
2867		}
2868
2869		if self.blocks_pruning != BlocksPruning::KeepAll {
2870			// Only increase reference count for this hash. Value is loaded once we prune.
2871			self.blockchain.bump_ref(hash);
2872		}
2873		Ok(())
2874	}
2875
2876	fn unpin_block(&self, hash: <Block as BlockT>::Hash) {
2877		self.storage.state_db.unpin(&hash);
2878
2879		if self.blocks_pruning != BlocksPruning::KeepAll {
2880			self.blockchain.unpin(hash);
2881		}
2882	}
2883}
2884
2885impl<Block: BlockT> sc_client_api::backend::LocalBackend<Block> for Backend<Block> {}
2886
2887#[cfg(test)]
2888pub(crate) mod tests {
2889	use super::*;
2890	use crate::{columns, utils::number_and_hash_to_lookup_key};
2891	use hash_db::{HashDB, EMPTY_PREFIX};
2892	use sc_client_api::{
2893		backend::{Backend as BTrait, BlockImportOperation as Op},
2894		blockchain::Backend as BLBTrait,
2895	};
2896	use sp_blockchain::{lowest_common_ancestor, tree_route};
2897	use sp_core::H256;
2898	use sp_runtime::{
2899		testing::{Block as RawBlock, Header, MockCallU64, TestXt},
2900		traits::{BlakeTwo256, Hash},
2901		ConsensusEngineId, StateVersion,
2902	};
2903
2904	const CONS0_ENGINE_ID: ConsensusEngineId = *b"CON0";
2905	const CONS1_ENGINE_ID: ConsensusEngineId = *b"CON1";
2906
2907	type UncheckedXt = TestXt<MockCallU64, ()>;
2908	pub(crate) type Block = RawBlock<UncheckedXt>;
2909
2910	pub fn insert_header(
2911		backend: &Backend<Block>,
2912		number: u64,
2913		parent_hash: H256,
2914		changes: Option<Vec<(Vec<u8>, Vec<u8>)>>,
2915		extrinsics_root: H256,
2916	) -> H256 {
2917		insert_block(backend, number, parent_hash, changes, extrinsics_root, Vec::new(), None)
2918			.unwrap()
2919	}
2920
2921	pub fn insert_block(
2922		backend: &Backend<Block>,
2923		number: u64,
2924		parent_hash: H256,
2925		_changes: Option<Vec<(Vec<u8>, Vec<u8>)>>,
2926		extrinsics_root: H256,
2927		body: Vec<UncheckedXt>,
2928		transaction_index: Option<Vec<IndexOperation>>,
2929	) -> Result<H256, sp_blockchain::Error> {
2930		use sp_runtime::testing::Digest;
2931
2932		let digest = Digest::default();
2933		let mut header =
2934			Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root };
2935
2936		let block_hash = if number == 0 { Default::default() } else { parent_hash };
2937		let mut op = backend.begin_operation().unwrap();
2938		backend.begin_state_operation(&mut op, block_hash).unwrap();
2939		if let Some(index) = transaction_index {
2940			op.update_transaction_index(index).unwrap();
2941		}
2942
2943		// Insert some fake data to ensure that the block can be found in the state column.
2944		let (root, overlay) = op.old_state.storage_root(
2945			vec![(block_hash.as_ref(), Some(block_hash.as_ref()))].into_iter(),
2946			StateVersion::V1,
2947		);
2948		op.update_db_storage(overlay).unwrap();
2949		header.state_root = root.into();
2950
2951		op.set_block_data(header.clone(), Some(body), None, None, NewBlockState::Best, true)
2952			.unwrap();
2953
2954		backend.commit_operation(op)?;
2955
2956		Ok(header.hash())
2957	}
2958
2959	pub fn insert_disconnected_header(
2960		backend: &Backend<Block>,
2961		number: u64,
2962		parent_hash: H256,
2963		extrinsics_root: H256,
2964		best: bool,
2965	) -> H256 {
2966		use sp_runtime::testing::Digest;
2967
2968		let digest = Digest::default();
2969		let header =
2970			Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root };
2971
2972		let mut op = backend.begin_operation().unwrap();
2973
2974		op.set_block_data(
2975			header.clone(),
2976			Some(vec![]),
2977			None,
2978			None,
2979			if best { NewBlockState::Best } else { NewBlockState::Normal },
2980			true,
2981		)
2982		.unwrap();
2983
2984		backend.commit_operation(op).unwrap();
2985
2986		header.hash()
2987	}
2988
2989	pub fn insert_header_no_head(
2990		backend: &Backend<Block>,
2991		number: u64,
2992		parent_hash: H256,
2993		extrinsics_root: H256,
2994	) -> H256 {
2995		use sp_runtime::testing::Digest;
2996
2997		let digest = Digest::default();
2998		let mut header =
2999			Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root };
3000		let mut op = backend.begin_operation().unwrap();
3001
3002		let root = backend
3003			.state_at(parent_hash, TrieCacheContext::Untrusted)
3004			.unwrap_or_else(|_| {
3005				if parent_hash == Default::default() {
3006					backend.empty_state()
3007				} else {
3008					panic!("Unknown block: {parent_hash:?}")
3009				}
3010			})
3011			.storage_root(
3012				vec![(parent_hash.as_ref(), Some(parent_hash.as_ref()))].into_iter(),
3013				StateVersion::V1,
3014			)
3015			.0;
3016		header.state_root = root.into();
3017
3018		op.set_block_data(header.clone(), None, None, None, NewBlockState::Normal, true)
3019			.unwrap();
3020		backend.commit_operation(op).unwrap();
3021
3022		header.hash()
3023	}
3024
3025	#[test]
3026	fn block_hash_inserted_correctly() {
3027		let backing = {
3028			let db = Backend::<Block>::new_test(1, 0);
3029			for i in 0..10 {
3030				assert!(db.blockchain().hash(i).unwrap().is_none());
3031
3032				{
3033					let hash = if i == 0 {
3034						Default::default()
3035					} else {
3036						db.blockchain.hash(i - 1).unwrap().unwrap()
3037					};
3038
3039					let mut op = db.begin_operation().unwrap();
3040					db.begin_state_operation(&mut op, hash).unwrap();
3041					let header = Header {
3042						number: i,
3043						parent_hash: hash,
3044						state_root: Default::default(),
3045						digest: Default::default(),
3046						extrinsics_root: Default::default(),
3047					};
3048
3049					op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3050						.unwrap();
3051					db.commit_operation(op).unwrap();
3052				}
3053
3054				assert!(db.blockchain().hash(i).unwrap().is_some())
3055			}
3056			db.storage.db.clone()
3057		};
3058
3059		let backend = Backend::<Block>::new(
3060			DatabaseSettings {
3061				trie_cache_maximum_size: Some(16 * 1024 * 1024),
3062				state_pruning: Some(PruningMode::blocks_pruning(1)),
3063				source: DatabaseSource::Custom { db: backing, require_create_flag: false },
3064				blocks_pruning: BlocksPruning::KeepFinalized,
3065				pruning_filters: Default::default(),
3066				metrics_registry: None,
3067			},
3068			0,
3069		)
3070		.unwrap();
3071		assert_eq!(backend.blockchain().info().best_number, 9);
3072		for i in 0..10 {
3073			assert!(backend.blockchain().hash(i).unwrap().is_some())
3074		}
3075	}
3076
3077	#[test]
3078	fn set_state_data() {
3079		set_state_data_inner(StateVersion::V0);
3080		set_state_data_inner(StateVersion::V1);
3081	}
3082	fn set_state_data_inner(state_version: StateVersion) {
3083		let db = Backend::<Block>::new_test(2, 0);
3084		let hash = {
3085			let mut op = db.begin_operation().unwrap();
3086			let mut header = Header {
3087				number: 0,
3088				parent_hash: Default::default(),
3089				state_root: Default::default(),
3090				digest: Default::default(),
3091				extrinsics_root: Default::default(),
3092			};
3093
3094			let storage = vec![(vec![1, 3, 5], vec![2, 4, 6]), (vec![1, 2, 3], vec![9, 9, 9])];
3095
3096			header.state_root = op
3097				.old_state
3098				.storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..]))), state_version)
3099				.0
3100				.into();
3101			let hash = header.hash();
3102
3103			op.reset_storage(
3104				Storage {
3105					top: storage.into_iter().collect(),
3106					children_default: Default::default(),
3107				},
3108				state_version,
3109			)
3110			.unwrap();
3111			op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best, true)
3112				.unwrap();
3113
3114			db.commit_operation(op).unwrap();
3115
3116			let state = db.state_at(hash, TrieCacheContext::Untrusted).unwrap();
3117
3118			assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6]));
3119			assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9]));
3120			assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None);
3121
3122			hash
3123		};
3124
3125		{
3126			let mut op = db.begin_operation().unwrap();
3127			db.begin_state_operation(&mut op, hash).unwrap();
3128			let mut header = Header {
3129				number: 1,
3130				parent_hash: hash,
3131				state_root: Default::default(),
3132				digest: Default::default(),
3133				extrinsics_root: Default::default(),
3134			};
3135
3136			let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))];
3137
3138			let (root, overlay) = op.old_state.storage_root(
3139				storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
3140				state_version,
3141			);
3142			op.update_db_storage(overlay).unwrap();
3143			header.state_root = root.into();
3144
3145			op.update_storage(storage, Vec::new()).unwrap();
3146			op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best, true)
3147				.unwrap();
3148
3149			db.commit_operation(op).unwrap();
3150
3151			let state = db.state_at(header.hash(), TrieCacheContext::Untrusted).unwrap();
3152
3153			assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None);
3154			assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9]));
3155			assert_eq!(state.storage(&[5, 5, 5]).unwrap(), Some(vec![4, 5, 6]));
3156		}
3157	}
3158
3159	#[test]
3160	fn delete_only_when_negative_rc() {
3161		sp_tracing::try_init_simple();
3162		let state_version = StateVersion::default();
3163		let key;
3164		let backend = Backend::<Block>::new_test(1, 0);
3165
3166		let hash = {
3167			let mut op = backend.begin_operation().unwrap();
3168			backend.begin_state_operation(&mut op, Default::default()).unwrap();
3169			let mut header = Header {
3170				number: 0,
3171				parent_hash: Default::default(),
3172				state_root: Default::default(),
3173				digest: Default::default(),
3174				extrinsics_root: Default::default(),
3175			};
3176
3177			header.state_root =
3178				op.old_state.storage_root(std::iter::empty(), state_version).0.into();
3179			let hash = header.hash();
3180
3181			op.reset_storage(
3182				Storage { top: Default::default(), children_default: Default::default() },
3183				state_version,
3184			)
3185			.unwrap();
3186
3187			key = op.db_updates.insert(EMPTY_PREFIX, b"hello");
3188			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3189				.unwrap();
3190
3191			backend.commit_operation(op).unwrap();
3192			assert_eq!(
3193				backend
3194					.storage
3195					.db
3196					.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
3197					.unwrap(),
3198				&b"hello"[..]
3199			);
3200			hash
3201		};
3202
3203		let hashof1 = {
3204			let mut op = backend.begin_operation().unwrap();
3205			backend.begin_state_operation(&mut op, hash).unwrap();
3206			let mut header = Header {
3207				number: 1,
3208				parent_hash: hash,
3209				state_root: Default::default(),
3210				digest: Default::default(),
3211				extrinsics_root: Default::default(),
3212			};
3213
3214			let storage: Vec<(_, _)> = vec![];
3215
3216			header.state_root = op
3217				.old_state
3218				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
3219				.0
3220				.into();
3221			let hash = header.hash();
3222
3223			op.db_updates.insert(EMPTY_PREFIX, b"hello");
3224			op.db_updates.remove(&key, EMPTY_PREFIX);
3225			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3226				.unwrap();
3227
3228			backend.commit_operation(op).unwrap();
3229			assert_eq!(
3230				backend
3231					.storage
3232					.db
3233					.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
3234					.unwrap(),
3235				&b"hello"[..]
3236			);
3237			hash
3238		};
3239
3240		let hashof2 = {
3241			let mut op = backend.begin_operation().unwrap();
3242			backend.begin_state_operation(&mut op, hashof1).unwrap();
3243			let mut header = Header {
3244				number: 2,
3245				parent_hash: hashof1,
3246				state_root: Default::default(),
3247				digest: Default::default(),
3248				extrinsics_root: Default::default(),
3249			};
3250
3251			let storage: Vec<(_, _)> = vec![];
3252
3253			header.state_root = op
3254				.old_state
3255				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
3256				.0
3257				.into();
3258			let hash = header.hash();
3259
3260			op.db_updates.remove(&key, EMPTY_PREFIX);
3261			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3262				.unwrap();
3263
3264			backend.commit_operation(op).unwrap();
3265
3266			assert!(backend
3267				.storage
3268				.db
3269				.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
3270				.is_some());
3271			hash
3272		};
3273
3274		let hashof3 = {
3275			let mut op = backend.begin_operation().unwrap();
3276			backend.begin_state_operation(&mut op, hashof2).unwrap();
3277			let mut header = Header {
3278				number: 3,
3279				parent_hash: hashof2,
3280				state_root: Default::default(),
3281				digest: Default::default(),
3282				extrinsics_root: Default::default(),
3283			};
3284
3285			let storage: Vec<(_, _)> = vec![];
3286
3287			header.state_root = op
3288				.old_state
3289				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
3290				.0
3291				.into();
3292			let hash = header.hash();
3293
3294			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3295				.unwrap();
3296
3297			backend.commit_operation(op).unwrap();
3298			hash
3299		};
3300
3301		let hashof4 = {
3302			let mut op = backend.begin_operation().unwrap();
3303			backend.begin_state_operation(&mut op, hashof3).unwrap();
3304			let mut header = Header {
3305				number: 4,
3306				parent_hash: hashof3,
3307				state_root: Default::default(),
3308				digest: Default::default(),
3309				extrinsics_root: Default::default(),
3310			};
3311
3312			let storage: Vec<(_, _)> = vec![];
3313
3314			header.state_root = op
3315				.old_state
3316				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
3317				.0
3318				.into();
3319			let hash = header.hash();
3320
3321			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3322				.unwrap();
3323
3324			backend.commit_operation(op).unwrap();
3325			assert!(backend
3326				.storage
3327				.db
3328				.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
3329				.is_none());
3330			hash
3331		};
3332
3333		backend.finalize_block(hashof1, None).unwrap();
3334		backend.finalize_block(hashof2, None).unwrap();
3335		backend.finalize_block(hashof3, None).unwrap();
3336		backend.finalize_block(hashof4, None).unwrap();
3337		assert!(backend
3338			.storage
3339			.db
3340			.get(columns::STATE, &sp_trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
3341			.is_none());
3342	}
3343
3344	#[test]
3345	fn tree_route_works() {
3346		let backend = Backend::<Block>::new_test(1000, 100);
3347		let blockchain = backend.blockchain();
3348		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3349
3350		// fork from genesis: 3 prong.
3351		let a1 = insert_header(&backend, 1, block0, None, Default::default());
3352		let a2 = insert_header(&backend, 2, a1, None, Default::default());
3353		let a3 = insert_header(&backend, 3, a2, None, Default::default());
3354
3355		// fork from genesis: 2 prong.
3356		let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32]));
3357		let b2 = insert_header(&backend, 2, b1, None, Default::default());
3358
3359		{
3360			let tree_route = tree_route(blockchain, a1, a1).unwrap();
3361
3362			assert_eq!(tree_route.common_block().hash, a1);
3363			assert!(tree_route.retracted().is_empty());
3364			assert!(tree_route.enacted().is_empty());
3365		}
3366
3367		{
3368			let tree_route = tree_route(blockchain, a3, b2).unwrap();
3369
3370			assert_eq!(tree_route.common_block().hash, block0);
3371			assert_eq!(
3372				tree_route.retracted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3373				vec![a3, a2, a1]
3374			);
3375			assert_eq!(
3376				tree_route.enacted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3377				vec![b1, b2]
3378			);
3379		}
3380
3381		{
3382			let tree_route = tree_route(blockchain, a1, a3).unwrap();
3383
3384			assert_eq!(tree_route.common_block().hash, a1);
3385			assert!(tree_route.retracted().is_empty());
3386			assert_eq!(
3387				tree_route.enacted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3388				vec![a2, a3]
3389			);
3390		}
3391
3392		{
3393			let tree_route = tree_route(blockchain, a3, a1).unwrap();
3394
3395			assert_eq!(tree_route.common_block().hash, a1);
3396			assert_eq!(
3397				tree_route.retracted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3398				vec![a3, a2]
3399			);
3400			assert!(tree_route.enacted().is_empty());
3401		}
3402
3403		{
3404			let tree_route = tree_route(blockchain, a2, a2).unwrap();
3405
3406			assert_eq!(tree_route.common_block().hash, a2);
3407			assert!(tree_route.retracted().is_empty());
3408			assert!(tree_route.enacted().is_empty());
3409		}
3410	}
3411
3412	#[test]
3413	fn tree_route_child() {
3414		let backend = Backend::<Block>::new_test(1000, 100);
3415		let blockchain = backend.blockchain();
3416
3417		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3418		let block1 = insert_header(&backend, 1, block0, None, Default::default());
3419
3420		{
3421			let tree_route = tree_route(blockchain, block0, block1).unwrap();
3422
3423			assert_eq!(tree_route.common_block().hash, block0);
3424			assert!(tree_route.retracted().is_empty());
3425			assert_eq!(
3426				tree_route.enacted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3427				vec![block1]
3428			);
3429		}
3430	}
3431
3432	#[test]
3433	fn lowest_common_ancestor_works() {
3434		let backend = Backend::<Block>::new_test(1000, 100);
3435		let blockchain = backend.blockchain();
3436		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3437
3438		// fork from genesis: 3 prong.
3439		let a1 = insert_header(&backend, 1, block0, None, Default::default());
3440		let a2 = insert_header(&backend, 2, a1, None, Default::default());
3441		let a3 = insert_header(&backend, 3, a2, None, Default::default());
3442
3443		// fork from genesis: 2 prong.
3444		let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32]));
3445		let b2 = insert_header(&backend, 2, b1, None, Default::default());
3446
3447		{
3448			let lca = lowest_common_ancestor(blockchain, a3, b2).unwrap();
3449
3450			assert_eq!(lca.hash, block0);
3451			assert_eq!(lca.number, 0);
3452		}
3453
3454		{
3455			let lca = lowest_common_ancestor(blockchain, a1, a3).unwrap();
3456
3457			assert_eq!(lca.hash, a1);
3458			assert_eq!(lca.number, 1);
3459		}
3460
3461		{
3462			let lca = lowest_common_ancestor(blockchain, a3, a1).unwrap();
3463
3464			assert_eq!(lca.hash, a1);
3465			assert_eq!(lca.number, 1);
3466		}
3467
3468		{
3469			let lca = lowest_common_ancestor(blockchain, a2, a3).unwrap();
3470
3471			assert_eq!(lca.hash, a2);
3472			assert_eq!(lca.number, 2);
3473		}
3474
3475		{
3476			let lca = lowest_common_ancestor(blockchain, a2, a1).unwrap();
3477
3478			assert_eq!(lca.hash, a1);
3479			assert_eq!(lca.number, 1);
3480		}
3481
3482		{
3483			let lca = lowest_common_ancestor(blockchain, a2, a2).unwrap();
3484
3485			assert_eq!(lca.hash, a2);
3486			assert_eq!(lca.number, 2);
3487		}
3488	}
3489
3490	#[test]
3491	fn displaced_leaves_after_finalizing_works_with_disconnect() {
3492		// In this test we will create a situation that can typically happen after warp sync.
3493		// The situation looks like this:
3494		// g -> <unimported> -> a3 -> a4
3495		// Basically there is a gap of unimported blocks at some point in the chain.
3496		let backend = Backend::<Block>::new_test(1000, 100);
3497		let blockchain = backend.blockchain();
3498		let genesis_number = 0;
3499		let genesis_hash =
3500			insert_header(&backend, genesis_number, Default::default(), None, Default::default());
3501
3502		let a3_number = 3;
3503		let a3_hash = insert_disconnected_header(
3504			&backend,
3505			a3_number,
3506			H256::from([200; 32]),
3507			H256::from([1; 32]),
3508			true,
3509		);
3510
3511		let a4_number = 4;
3512		let a4_hash =
3513			insert_disconnected_header(&backend, a4_number, a3_hash, H256::from([2; 32]), true);
3514		{
3515			let displaced = blockchain
3516				.displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([200; 32]))
3517				.unwrap();
3518			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, genesis_hash]);
3519			assert_eq!(displaced.displaced_leaves, vec![(genesis_number, genesis_hash)]);
3520			assert_eq!(displaced.displaced_blocks, vec![]);
3521		}
3522
3523		{
3524			let displaced = blockchain
3525				.displaced_leaves_after_finalizing(a4_hash, a4_number, a3_hash)
3526				.unwrap();
3527			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, genesis_hash]);
3528			assert_eq!(displaced.displaced_leaves, vec![(genesis_number, genesis_hash)]);
3529			assert_eq!(displaced.displaced_blocks, vec![]);
3530		}
3531
3532		// Import block a1 which has the genesis block as parent.
3533		// g -> a1 -> <unimported> -> a3(f) -> a4
3534		let a1_number = 1;
3535		let a1_hash = insert_disconnected_header(
3536			&backend,
3537			a1_number,
3538			genesis_hash,
3539			H256::from([123; 32]),
3540			false,
3541		);
3542		{
3543			let displaced = blockchain
3544				.displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([2; 32]))
3545				.unwrap();
3546			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, a1_hash]);
3547			assert_eq!(displaced.displaced_leaves, vec![]);
3548			assert_eq!(displaced.displaced_blocks, vec![]);
3549		}
3550
3551		// Import block b1 which has the genesis block as parent.
3552		// g -> a1 -> <unimported> -> a3(f) -> a4
3553		//  \-> b1
3554		let b1_number = 1;
3555		let b1_hash = insert_disconnected_header(
3556			&backend,
3557			b1_number,
3558			genesis_hash,
3559			H256::from([124; 32]),
3560			false,
3561		);
3562		{
3563			let displaced = blockchain
3564				.displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([2; 32]))
3565				.unwrap();
3566			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, a1_hash, b1_hash]);
3567			assert_eq!(displaced.displaced_leaves, vec![]);
3568			assert_eq!(displaced.displaced_blocks, vec![]);
3569		}
3570
3571		// If branch of b blocks is higher in number than a branch, we
3572		// should still not prune disconnected leafs.
3573		// g -> a1 -> <unimported> -> a3(f) -> a4
3574		//  \-> b1 -> b2 ----------> b3 ----> b4 -> b5
3575		let b2_number = 2;
3576		let b2_hash =
3577			insert_disconnected_header(&backend, b2_number, b1_hash, H256::from([40; 32]), false);
3578		let b3_number = 3;
3579		let b3_hash =
3580			insert_disconnected_header(&backend, b3_number, b2_hash, H256::from([41; 32]), false);
3581		let b4_number = 4;
3582		let b4_hash =
3583			insert_disconnected_header(&backend, b4_number, b3_hash, H256::from([42; 32]), false);
3584		let b5_number = 5;
3585		let b5_hash =
3586			insert_disconnected_header(&backend, b5_number, b4_hash, H256::from([43; 32]), false);
3587		{
3588			let displaced = blockchain
3589				.displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([2; 32]))
3590				.unwrap();
3591			assert_eq!(blockchain.leaves().unwrap(), vec![b5_hash, a4_hash, a1_hash]);
3592			assert_eq!(displaced.displaced_leaves, vec![]);
3593			assert_eq!(displaced.displaced_blocks, vec![]);
3594		}
3595
3596		// Even though there is a disconnect, diplace should still detect
3597		// branches above the block gap.
3598		//                              /-> c4
3599		// g -> a1 -> <unimported> -> a3 -> a4(f)
3600		//  \-> b1 -> b2 ----------> b3 -> b4 -> b5
3601		let c4_number = 4;
3602		let c4_hash =
3603			insert_disconnected_header(&backend, c4_number, a3_hash, H256::from([44; 32]), false);
3604		{
3605			let displaced = blockchain
3606				.displaced_leaves_after_finalizing(a4_hash, a4_number, a3_hash)
3607				.unwrap();
3608			assert_eq!(blockchain.leaves().unwrap(), vec![b5_hash, a4_hash, c4_hash, a1_hash]);
3609			assert_eq!(displaced.displaced_leaves, vec![(c4_number, c4_hash)]);
3610			assert_eq!(displaced.displaced_blocks, vec![c4_hash]);
3611		}
3612	}
3613
3614	#[test]
3615	fn disconnected_blocks_do_not_become_leaves_and_warp_sync_scenario() {
3616		// Simulate a realistic case:
3617		//
3618		// 1. Import genesis (block #0) normally — becomes a leaf.
3619		// 2. Import warp sync proof blocks at #5, #10, #15 without leaf registration. Their parents
3620		//    are NOT in the DB. They must NOT appear as leaves.
3621		// 3. Import block #20 as Final. Its parent (#19) is not in the DB. Being Final, it updates
3622		//    finalized number to 20.
3623		// 4. Import blocks #1..#19 with Normal state (gap sync). Since last_finalized_num is now 20
3624		//    and each block number < 20, the leaf condition (number > last_finalized_num ||
3625		//    last_finalized_num.is_zero()) is FALSE — they must NOT become leaves.
3626		// 5. Assert throughout and verify displaced_leaves_after_finalizing works cleanly with no
3627		//    disconnected proof blocks in the displaced list.
3628
3629		let backend = Backend::<Block>::new_test(1000, 100);
3630		let blockchain = backend.blockchain();
3631
3632		let insert_block_raw = |number: u64,
3633		                        parent_hash: H256,
3634		                        ext_root: H256,
3635		                        state: NewBlockState,
3636		                        register_as_leaf: bool|
3637		 -> H256 {
3638			use sp_runtime::testing::Digest;
3639			let digest = Digest::default();
3640			let header = Header {
3641				number,
3642				parent_hash,
3643				state_root: Default::default(),
3644				digest,
3645				extrinsics_root: ext_root,
3646			};
3647			let mut op = backend.begin_operation().unwrap();
3648			op.set_block_data(header.clone(), Some(vec![]), None, None, state, register_as_leaf)
3649				.unwrap();
3650			backend.commit_operation(op).unwrap();
3651			header.hash()
3652		};
3653
3654		// --- Step 1: import genesis ---
3655		let genesis_hash = insert_header(&backend, 0, Default::default(), None, Default::default());
3656		assert_eq!(blockchain.leaves().unwrap(), vec![genesis_hash]);
3657
3658		// --- Step 2: import warp sync proof blocks without leaf registration ---
3659		// These simulate authority-set-change blocks from the warp sync proof.
3660		// Their parents are NOT in the DB.
3661		let _proof5_hash = insert_block_raw(
3662			5,
3663			H256::from([5; 32]),
3664			H256::from([50; 32]),
3665			NewBlockState::Normal,
3666			false,
3667		);
3668		let _proof10_hash = insert_block_raw(
3669			10,
3670			H256::from([10; 32]),
3671			H256::from([100; 32]),
3672			NewBlockState::Normal,
3673			false,
3674		);
3675		let _proof15_hash = insert_block_raw(
3676			15,
3677			H256::from([15; 32]),
3678			H256::from([150; 32]),
3679			NewBlockState::Normal,
3680			false,
3681		);
3682
3683		// Leaves must still only contain genesis.
3684		assert_eq!(blockchain.leaves().unwrap(), vec![genesis_hash]);
3685
3686		// The disconnected blocks should still be retrievable from the DB.
3687		assert!(blockchain.header(_proof5_hash).unwrap().is_some());
3688		assert!(blockchain.header(_proof10_hash).unwrap().is_some());
3689		assert!(blockchain.header(_proof15_hash).unwrap().is_some());
3690
3691		// --- Step 3: import warp sync target block #20 as Final ---
3692		// Parent (#19) is not in the DB. Use the same low-level approach but with
3693		// NewBlockState::Final. Being Final, it will be set as best + finalized.
3694		let block20_hash = insert_block_raw(
3695			20,
3696			H256::from([19; 32]),
3697			H256::from([200; 32]),
3698			NewBlockState::Final,
3699			true,
3700		);
3701
3702		// Block #20 should now be a leaf (it's best and finalized).
3703		let leaves = blockchain.leaves().unwrap();
3704		assert!(leaves.contains(&block20_hash));
3705		// Verify finalized number was updated to 20.
3706		assert_eq!(blockchain.info().finalized_number, 20);
3707		assert_eq!(blockchain.info().finalized_hash, block20_hash);
3708		// Disconnected proof blocks must still not be leaves.
3709		assert!(!leaves.contains(&_proof5_hash));
3710		assert!(!leaves.contains(&_proof10_hash));
3711		assert!(!leaves.contains(&_proof15_hash));
3712
3713		// --- Step 4: import gap sync blocks #1..#19 with Normal state ---
3714		// Since last_finalized_num is 20, each block with number < 20 should NOT
3715		// become a leaf (the condition `number > last_finalized_num` is false).
3716		// Build the chain: genesis -> #1 -> #2 -> ... -> #19.
3717		let mut prev_hash = genesis_hash;
3718		let mut gap_hashes = Vec::new();
3719		for n in 1..=19 {
3720			let h = insert_disconnected_header(&backend, n, prev_hash, Default::default(), false);
3721			gap_hashes.push(h);
3722			prev_hash = h;
3723		}
3724
3725		// Verify gap sync blocks did NOT create new leaves.
3726		let leaves = blockchain.leaves().unwrap();
3727		for (i, gap_hash) in gap_hashes.iter().enumerate() {
3728			assert!(
3729				!leaves.contains(gap_hash),
3730				"Gap sync block #{} should not be a leaf, but it is",
3731				i + 1,
3732			);
3733		}
3734		// Block #20 should still be a leaf.
3735		assert!(leaves.contains(&block20_hash));
3736		// Disconnected proof blocks must still not be leaves.
3737		assert!(!leaves.contains(&_proof5_hash));
3738		assert!(!leaves.contains(&_proof10_hash));
3739		assert!(!leaves.contains(&_proof15_hash));
3740
3741		// --- Step 5: verify displaced_leaves_after_finalizing works cleanly ---
3742		// Call it for block #20 to verify no disconnected proof blocks appear
3743		// in the displaced list and it completes without errors.
3744		{
3745			let displaced = blockchain
3746				.displaced_leaves_after_finalizing(
3747					block20_hash,
3748					20,
3749					H256::from([19; 32]), // parent hash of block #20
3750				)
3751				.unwrap();
3752			// Disconnected proof blocks were never leaves, so they must not
3753			// appear in displaced_leaves.
3754			assert!(!displaced.displaced_leaves.iter().any(|(_, h)| *h == _proof5_hash),);
3755			assert!(!displaced.displaced_leaves.iter().any(|(_, h)| *h == _proof10_hash),);
3756			assert!(!displaced.displaced_leaves.iter().any(|(_, h)| *h == _proof15_hash),);
3757			// None of the gap sync blocks should be displaced leaves either
3758			// (they were never added as leaves).
3759			for gap_hash in &gap_hashes {
3760				assert!(!displaced.displaced_leaves.iter().any(|(_, h)| h == gap_hash),);
3761			}
3762		}
3763	}
3764
3765	#[test]
3766	fn displaced_leaves_after_finalizing_works() {
3767		let backend = Backend::<Block>::new_test(1000, 100);
3768		let blockchain = backend.blockchain();
3769		let genesis_number = 0;
3770		let genesis_hash =
3771			insert_header(&backend, genesis_number, Default::default(), None, Default::default());
3772
3773		// fork from genesis: 3 prong.
3774		// block 0 -> a1 -> a2 -> a3
3775		//        \
3776		//         -> b1 -> b2 -> c1 -> c2
3777		//              \
3778		//               -> d1 -> d2
3779		let a1_number = 1;
3780		let a1_hash = insert_header(&backend, a1_number, genesis_hash, None, Default::default());
3781		let a2_number = 2;
3782		let a2_hash = insert_header(&backend, a2_number, a1_hash, None, Default::default());
3783		let a3_number = 3;
3784		let a3_hash = insert_header(&backend, a3_number, a2_hash, None, Default::default());
3785
3786		{
3787			let displaced = blockchain
3788				.displaced_leaves_after_finalizing(genesis_hash, genesis_number, Default::default())
3789				.unwrap();
3790			assert_eq!(displaced.displaced_leaves, vec![]);
3791			assert_eq!(displaced.displaced_blocks, vec![]);
3792		}
3793		{
3794			let displaced_a1 = blockchain
3795				.displaced_leaves_after_finalizing(a1_hash, a1_number, genesis_hash)
3796				.unwrap();
3797			assert_eq!(displaced_a1.displaced_leaves, vec![]);
3798			assert_eq!(displaced_a1.displaced_blocks, vec![]);
3799
3800			let displaced_a2 = blockchain
3801				.displaced_leaves_after_finalizing(a2_hash, a2_number, a1_hash)
3802				.unwrap();
3803			assert_eq!(displaced_a2.displaced_leaves, vec![]);
3804			assert_eq!(displaced_a2.displaced_blocks, vec![]);
3805
3806			let displaced_a3 = blockchain
3807				.displaced_leaves_after_finalizing(a3_hash, a3_number, a2_hash)
3808				.unwrap();
3809			assert_eq!(displaced_a3.displaced_leaves, vec![]);
3810			assert_eq!(displaced_a3.displaced_blocks, vec![]);
3811		}
3812		{
3813			// Finalized block is above leaves and not imported yet.
3814			// We will not be able to make a connection,
3815			// nothing can be marked as displaced.
3816			let displaced = blockchain
3817				.displaced_leaves_after_finalizing(H256::from([57; 32]), 10, H256::from([56; 32]))
3818				.unwrap();
3819			assert_eq!(displaced.displaced_leaves, vec![]);
3820			assert_eq!(displaced.displaced_blocks, vec![]);
3821		}
3822
3823		// fork from genesis: 2 prong.
3824		let b1_number = 1;
3825		let b1_hash = insert_header(&backend, b1_number, genesis_hash, None, H256::from([1; 32]));
3826		let b2_number = 2;
3827		let b2_hash = insert_header(&backend, b2_number, b1_hash, None, Default::default());
3828
3829		// fork from b2.
3830		let c1_number = 3;
3831		let c1_hash = insert_header(&backend, c1_number, b2_hash, None, H256::from([2; 32]));
3832		let c2_number = 4;
3833		let c2_hash = insert_header(&backend, c2_number, c1_hash, None, Default::default());
3834
3835		// fork from b1.
3836		let d1_number = 2;
3837		let d1_hash = insert_header(&backend, d1_number, b1_hash, None, H256::from([3; 32]));
3838		let d2_number = 3;
3839		let d2_hash = insert_header(&backend, d2_number, d1_hash, None, Default::default());
3840
3841		{
3842			let displaced_a1 = blockchain
3843				.displaced_leaves_after_finalizing(a1_hash, a1_number, genesis_hash)
3844				.unwrap();
3845			assert_eq!(
3846				displaced_a1.displaced_leaves,
3847				vec![(c2_number, c2_hash), (d2_number, d2_hash)]
3848			);
3849			let mut displaced_blocks = vec![b1_hash, b2_hash, c1_hash, c2_hash, d1_hash, d2_hash];
3850			displaced_blocks.sort();
3851			assert_eq!(displaced_a1.displaced_blocks, displaced_blocks);
3852
3853			let displaced_a2 = blockchain
3854				.displaced_leaves_after_finalizing(a2_hash, a2_number, a1_hash)
3855				.unwrap();
3856			assert_eq!(displaced_a1.displaced_leaves, displaced_a2.displaced_leaves);
3857			assert_eq!(displaced_a1.displaced_blocks, displaced_a2.displaced_blocks);
3858
3859			let displaced_a3 = blockchain
3860				.displaced_leaves_after_finalizing(a3_hash, a3_number, a2_hash)
3861				.unwrap();
3862			assert_eq!(displaced_a1.displaced_leaves, displaced_a3.displaced_leaves);
3863			assert_eq!(displaced_a1.displaced_blocks, displaced_a3.displaced_blocks);
3864		}
3865		{
3866			let displaced = blockchain
3867				.displaced_leaves_after_finalizing(b1_hash, b1_number, genesis_hash)
3868				.unwrap();
3869			assert_eq!(displaced.displaced_leaves, vec![(a3_number, a3_hash)]);
3870			let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash];
3871			displaced_blocks.sort();
3872			assert_eq!(displaced.displaced_blocks, displaced_blocks);
3873		}
3874		{
3875			let displaced = blockchain
3876				.displaced_leaves_after_finalizing(b2_hash, b2_number, b1_hash)
3877				.unwrap();
3878			assert_eq!(
3879				displaced.displaced_leaves,
3880				vec![(a3_number, a3_hash), (d2_number, d2_hash)]
3881			);
3882			let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash, d1_hash, d2_hash];
3883			displaced_blocks.sort();
3884			assert_eq!(displaced.displaced_blocks, displaced_blocks);
3885		}
3886		{
3887			let displaced = blockchain
3888				.displaced_leaves_after_finalizing(c2_hash, c2_number, c1_hash)
3889				.unwrap();
3890			assert_eq!(
3891				displaced.displaced_leaves,
3892				vec![(a3_number, a3_hash), (d2_number, d2_hash)]
3893			);
3894			let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash, d1_hash, d2_hash];
3895			displaced_blocks.sort();
3896			assert_eq!(displaced.displaced_blocks, displaced_blocks);
3897		}
3898	}
3899
3900	#[test]
3901	fn test_tree_route_regression() {
3902		// NOTE: this is a test for a regression introduced in #3665, the result
3903		// of tree_route would be erroneously computed, since it was taking into
3904		// account the `ancestor` in `CachedHeaderMetadata` for the comparison.
3905		// in this test we simulate the same behavior with the side-effect
3906		// triggering the issue being eviction of a previously fetched record
3907		// from the cache, therefore this test is dependent on the LRU cache
3908		// size for header metadata, which is currently set to 5000 elements.
3909		let backend = Backend::<Block>::new_test(10000, 10000);
3910		let blockchain = backend.blockchain();
3911
3912		let genesis = insert_header(&backend, 0, Default::default(), None, Default::default());
3913
3914		let block100 = (1..=100).fold(genesis, |parent, n| {
3915			insert_header(&backend, n, parent, None, Default::default())
3916		});
3917
3918		let block7000 = (101..=7000).fold(block100, |parent, n| {
3919			insert_header(&backend, n, parent, None, Default::default())
3920		});
3921
3922		// This will cause the ancestor of `block100` to be set to `genesis` as a side-effect.
3923		lowest_common_ancestor(blockchain, genesis, block100).unwrap();
3924
3925		// While traversing the tree we will have to do 6900 calls to
3926		// `header_metadata`, which will make sure we will exhaust our cache
3927		// which only takes 5000 elements. In particular, the `CachedHeaderMetadata` struct for
3928		// block #100 will be evicted and will get a new value (with ancestor set to its parent).
3929		let tree_route = tree_route(blockchain, block100, block7000).unwrap();
3930
3931		assert!(tree_route.retracted().is_empty());
3932	}
3933
3934	#[test]
3935	fn test_leaves_with_complex_block_tree() {
3936		let backend: Arc<Backend<substrate_test_runtime_client::runtime::Block>> =
3937			Arc::new(Backend::new_test(20, 20));
3938		substrate_test_runtime_client::trait_tests::test_leaves_for_backend(backend);
3939	}
3940
3941	#[test]
3942	fn test_children_with_complex_block_tree() {
3943		let backend: Arc<Backend<substrate_test_runtime_client::runtime::Block>> =
3944			Arc::new(Backend::new_test(20, 20));
3945		substrate_test_runtime_client::trait_tests::test_children_for_backend(backend);
3946	}
3947
3948	#[test]
3949	fn test_blockchain_query_by_number_gets_canonical() {
3950		let backend: Arc<Backend<substrate_test_runtime_client::runtime::Block>> =
3951			Arc::new(Backend::new_test(20, 20));
3952		substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical(
3953			backend,
3954		);
3955	}
3956
3957	#[test]
3958	fn test_leaves_pruned_on_finality() {
3959		//   / 1b - 2b - 3b
3960		// 0 - 1a - 2a
3961		//   \ 1c
3962		let backend: Backend<Block> = Backend::new_test(10, 10);
3963		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3964
3965		let block1_a = insert_header(&backend, 1, block0, None, Default::default());
3966		let block1_b = insert_header(&backend, 1, block0, None, [1; 32].into());
3967		let block1_c = insert_header(&backend, 1, block0, None, [2; 32].into());
3968
3969		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block1_a, block1_b, block1_c]);
3970
3971		let block2_a = insert_header(&backend, 2, block1_a, None, Default::default());
3972		let block2_b = insert_header(&backend, 2, block1_b, None, Default::default());
3973
3974		let block3_b = insert_header(&backend, 3, block2_b, None, [3; 32].into());
3975
3976		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block3_b, block2_a, block1_c]);
3977
3978		backend.finalize_block(block1_a, None).unwrap();
3979		backend.finalize_block(block2_a, None).unwrap();
3980
3981		// All leaves are pruned that are known to not belong to canonical branch
3982		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]);
3983	}
3984
3985	#[test]
3986	fn test_aux() {
3987		let backend: Backend<substrate_test_runtime_client::runtime::Block> =
3988			Backend::new_test(0, 0);
3989		assert!(backend.get_aux(b"test").unwrap().is_none());
3990		backend.insert_aux(&[(&b"test"[..], &b"hello"[..])], &[]).unwrap();
3991		assert_eq!(b"hello", &backend.get_aux(b"test").unwrap().unwrap()[..]);
3992		backend.insert_aux(&[], &[&b"test"[..]]).unwrap();
3993		assert!(backend.get_aux(b"test").unwrap().is_none());
3994	}
3995
3996	#[test]
3997	fn test_finalize_block_with_justification() {
3998		use sc_client_api::blockchain::Backend as BlockChainBackend;
3999
4000		let backend = Backend::<Block>::new_test(10, 10);
4001
4002		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4003		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4004
4005		let justification = Some((CONS0_ENGINE_ID, vec![1, 2, 3]));
4006		backend.finalize_block(block1, justification.clone()).unwrap();
4007
4008		assert_eq!(
4009			backend.blockchain().justifications(block1).unwrap(),
4010			justification.map(Justifications::from),
4011		);
4012	}
4013
4014	#[test]
4015	fn test_append_justification_to_finalized_block() {
4016		use sc_client_api::blockchain::Backend as BlockChainBackend;
4017
4018		let backend = Backend::<Block>::new_test(10, 10);
4019
4020		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4021		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4022
4023		let just0 = (CONS0_ENGINE_ID, vec![1, 2, 3]);
4024		backend.finalize_block(block1, Some(just0.clone().into())).unwrap();
4025
4026		let just1 = (CONS1_ENGINE_ID, vec![4, 5]);
4027		backend.append_justification(block1, just1.clone()).unwrap();
4028
4029		let just2 = (CONS1_ENGINE_ID, vec![6, 7]);
4030		assert!(matches!(
4031			backend.append_justification(block1, just2),
4032			Err(ClientError::BadJustification(_))
4033		));
4034
4035		let justifications = {
4036			let mut just = Justifications::from(just0);
4037			just.append(just1);
4038			just
4039		};
4040		assert_eq!(backend.blockchain().justifications(block1).unwrap(), Some(justifications),);
4041	}
4042
4043	#[test]
4044	fn test_finalize_multiple_blocks_in_single_op() {
4045		let backend = Backend::<Block>::new_test(10, 10);
4046
4047		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4048		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4049		let block2 = insert_header(&backend, 2, block1, None, Default::default());
4050		let block3 = insert_header(&backend, 3, block2, None, Default::default());
4051		let block4 = insert_header(&backend, 4, block3, None, Default::default());
4052		{
4053			let mut op = backend.begin_operation().unwrap();
4054			backend.begin_state_operation(&mut op, block0).unwrap();
4055			op.mark_finalized(block1, None).unwrap();
4056			op.mark_finalized(block2, None).unwrap();
4057			backend.commit_operation(op).unwrap();
4058		}
4059		{
4060			let mut op = backend.begin_operation().unwrap();
4061			backend.begin_state_operation(&mut op, block2).unwrap();
4062			op.mark_finalized(block3, None).unwrap();
4063			op.mark_finalized(block4, None).unwrap();
4064			backend.commit_operation(op).unwrap();
4065		}
4066	}
4067
4068	#[test]
4069	fn storage_hash_is_cached_correctly() {
4070		let state_version = StateVersion::default();
4071		let backend = Backend::<Block>::new_test(10, 10);
4072
4073		let hash0 = {
4074			let mut op = backend.begin_operation().unwrap();
4075			backend.begin_state_operation(&mut op, Default::default()).unwrap();
4076			let mut header = Header {
4077				number: 0,
4078				parent_hash: Default::default(),
4079				state_root: Default::default(),
4080				digest: Default::default(),
4081				extrinsics_root: Default::default(),
4082			};
4083
4084			let storage = vec![(b"test".to_vec(), b"test".to_vec())];
4085
4086			header.state_root = op
4087				.old_state
4088				.storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..]))), state_version)
4089				.0
4090				.into();
4091			let hash = header.hash();
4092
4093			op.reset_storage(
4094				Storage {
4095					top: storage.into_iter().collect(),
4096					children_default: Default::default(),
4097				},
4098				state_version,
4099			)
4100			.unwrap();
4101			op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best, true)
4102				.unwrap();
4103
4104			backend.commit_operation(op).unwrap();
4105
4106			hash
4107		};
4108
4109		let block0_hash = backend
4110			.state_at(hash0, TrieCacheContext::Untrusted)
4111			.unwrap()
4112			.storage_hash(&b"test"[..])
4113			.unwrap();
4114
4115		let hash1 = {
4116			let mut op = backend.begin_operation().unwrap();
4117			backend.begin_state_operation(&mut op, hash0).unwrap();
4118			let mut header = Header {
4119				number: 1,
4120				parent_hash: hash0,
4121				state_root: Default::default(),
4122				digest: Default::default(),
4123				extrinsics_root: Default::default(),
4124			};
4125
4126			let storage = vec![(b"test".to_vec(), Some(b"test2".to_vec()))];
4127
4128			let (root, overlay) = op.old_state.storage_root(
4129				storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
4130				state_version,
4131			);
4132			op.update_db_storage(overlay).unwrap();
4133			header.state_root = root.into();
4134			let hash = header.hash();
4135
4136			op.update_storage(storage, Vec::new()).unwrap();
4137			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Normal, true)
4138				.unwrap();
4139
4140			backend.commit_operation(op).unwrap();
4141
4142			hash
4143		};
4144
4145		{
4146			let header = backend.blockchain().header(hash1).unwrap().unwrap();
4147			let mut op = backend.begin_operation().unwrap();
4148			op.set_block_data(header, None, None, None, NewBlockState::Best, true).unwrap();
4149			backend.commit_operation(op).unwrap();
4150		}
4151
4152		let block1_hash = backend
4153			.state_at(hash1, TrieCacheContext::Untrusted)
4154			.unwrap()
4155			.storage_hash(&b"test"[..])
4156			.unwrap();
4157
4158		assert_ne!(block0_hash, block1_hash);
4159	}
4160
4161	#[test]
4162	fn test_finalize_non_sequential() {
4163		let backend = Backend::<Block>::new_test(10, 10);
4164
4165		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4166		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4167		let block2 = insert_header(&backend, 2, block1, None, Default::default());
4168		{
4169			let mut op = backend.begin_operation().unwrap();
4170			backend.begin_state_operation(&mut op, block0).unwrap();
4171			op.mark_finalized(block2, None).unwrap();
4172			backend.commit_operation(op).unwrap_err();
4173		}
4174	}
4175
4176	#[test]
4177	fn prune_blocks_on_finalize() {
4178		let pruning_modes =
4179			vec![BlocksPruning::Some(2), BlocksPruning::KeepFinalized, BlocksPruning::KeepAll];
4180
4181		for pruning_mode in pruning_modes {
4182			let backend = Backend::<Block>::new_test_with_tx_storage(pruning_mode, 0);
4183			let mut blocks = Vec::new();
4184			let mut prev_hash = Default::default();
4185			for i in 0..5 {
4186				let hash = insert_block(
4187					&backend,
4188					i,
4189					prev_hash,
4190					None,
4191					Default::default(),
4192					vec![UncheckedXt::new_transaction(i.into(), ())],
4193					None,
4194				)
4195				.unwrap();
4196				blocks.push(hash);
4197				prev_hash = hash;
4198			}
4199
4200			{
4201				let mut op = backend.begin_operation().unwrap();
4202				backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4203				for i in 1..5 {
4204					op.mark_finalized(blocks[i], None).unwrap();
4205				}
4206				backend.commit_operation(op).unwrap();
4207			}
4208			let bc = backend.blockchain();
4209
4210			if matches!(pruning_mode, BlocksPruning::Some(_)) {
4211				assert_eq!(None, bc.body(blocks[0]).unwrap());
4212				assert_eq!(None, bc.body(blocks[1]).unwrap());
4213				assert_eq!(None, bc.body(blocks[2]).unwrap());
4214				assert_eq!(
4215					Some(vec![UncheckedXt::new_transaction(3.into(), ())]),
4216					bc.body(blocks[3]).unwrap()
4217				);
4218				assert_eq!(
4219					Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
4220					bc.body(blocks[4]).unwrap()
4221				);
4222			} else {
4223				for i in 0..5 {
4224					assert_eq!(
4225						Some(vec![UncheckedXt::new_transaction((i as u64).into(), ())]),
4226						bc.body(blocks[i]).unwrap()
4227					);
4228				}
4229			}
4230		}
4231	}
4232
4233	#[test]
4234	fn prune_blocks_on_finalize_with_fork() {
4235		sp_tracing::try_init_simple();
4236
4237		let pruning_modes =
4238			vec![BlocksPruning::Some(2), BlocksPruning::KeepFinalized, BlocksPruning::KeepAll];
4239
4240		for pruning in pruning_modes {
4241			let backend = Backend::<Block>::new_test_with_tx_storage(pruning, 10);
4242			let mut blocks = Vec::new();
4243			let mut prev_hash = Default::default();
4244			for i in 0..5 {
4245				let hash = insert_block(
4246					&backend,
4247					i,
4248					prev_hash,
4249					None,
4250					Default::default(),
4251					vec![UncheckedXt::new_transaction(i.into(), ())],
4252					None,
4253				)
4254				.unwrap();
4255				blocks.push(hash);
4256				prev_hash = hash;
4257			}
4258
4259			// insert a fork at block 2
4260			let fork_hash_root = insert_block(
4261				&backend,
4262				2,
4263				blocks[1],
4264				None,
4265				H256::random(),
4266				vec![UncheckedXt::new_transaction(2.into(), ())],
4267				None,
4268			)
4269			.unwrap();
4270			insert_block(
4271				&backend,
4272				3,
4273				fork_hash_root,
4274				None,
4275				H256::random(),
4276				vec![
4277					UncheckedXt::new_transaction(3.into(), ()),
4278					UncheckedXt::new_transaction(11.into(), ()),
4279				],
4280				None,
4281			)
4282			.unwrap();
4283			let mut op = backend.begin_operation().unwrap();
4284			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4285			op.mark_head(blocks[4]).unwrap();
4286			backend.commit_operation(op).unwrap();
4287
4288			let bc = backend.blockchain();
4289			assert_eq!(
4290				Some(vec![UncheckedXt::new_transaction(2.into(), ())]),
4291				bc.body(fork_hash_root).unwrap()
4292			);
4293
4294			for i in 1..5 {
4295				let mut op = backend.begin_operation().unwrap();
4296				backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4297				op.mark_finalized(blocks[i], None).unwrap();
4298				backend.commit_operation(op).unwrap();
4299			}
4300
4301			if matches!(pruning, BlocksPruning::Some(_)) {
4302				assert_eq!(None, bc.body(blocks[0]).unwrap());
4303				assert_eq!(None, bc.body(blocks[1]).unwrap());
4304				assert_eq!(None, bc.body(blocks[2]).unwrap());
4305
4306				assert_eq!(
4307					Some(vec![UncheckedXt::new_transaction(3.into(), ())]),
4308					bc.body(blocks[3]).unwrap()
4309				);
4310				assert_eq!(
4311					Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
4312					bc.body(blocks[4]).unwrap()
4313				);
4314			} else {
4315				for i in 0..5 {
4316					assert_eq!(
4317						Some(vec![UncheckedXt::new_transaction((i as u64).into(), ())]),
4318						bc.body(blocks[i]).unwrap()
4319					);
4320				}
4321			}
4322
4323			if matches!(pruning, BlocksPruning::KeepAll) {
4324				assert_eq!(
4325					Some(vec![UncheckedXt::new_transaction(2.into(), ())]),
4326					bc.body(fork_hash_root).unwrap()
4327				);
4328			} else {
4329				assert_eq!(None, bc.body(fork_hash_root).unwrap());
4330			}
4331
4332			assert_eq!(bc.info().best_number, 4);
4333			for i in 0..5 {
4334				assert!(bc.hash(i).unwrap().is_some());
4335			}
4336		}
4337	}
4338
4339	#[test]
4340	fn prune_blocks_on_finalize_and_reorg() {
4341		// 	0 - 1b
4342		// 	\ - 1a - 2a - 3a
4343		// 	     \ - 2b
4344
4345		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(10), 10);
4346
4347		let make_block = |index, parent, val: u64| {
4348			insert_block(
4349				&backend,
4350				index,
4351				parent,
4352				None,
4353				H256::random(),
4354				vec![UncheckedXt::new_transaction(val.into(), ())],
4355				None,
4356			)
4357			.unwrap()
4358		};
4359
4360		let block_0 = make_block(0, Default::default(), 0x00);
4361		let block_1a = make_block(1, block_0, 0x1a);
4362		let block_1b = make_block(1, block_0, 0x1b);
4363		let block_2a = make_block(2, block_1a, 0x2a);
4364		let block_2b = make_block(2, block_1a, 0x2b);
4365		let block_3a = make_block(3, block_2a, 0x3a);
4366
4367		// Make sure 1b is head
4368		let mut op = backend.begin_operation().unwrap();
4369		backend.begin_state_operation(&mut op, block_0).unwrap();
4370		op.mark_head(block_1b).unwrap();
4371		backend.commit_operation(op).unwrap();
4372
4373		// Finalize 3a
4374		let mut op = backend.begin_operation().unwrap();
4375		backend.begin_state_operation(&mut op, block_0).unwrap();
4376		op.mark_head(block_3a).unwrap();
4377		op.mark_finalized(block_1a, None).unwrap();
4378		op.mark_finalized(block_2a, None).unwrap();
4379		op.mark_finalized(block_3a, None).unwrap();
4380		backend.commit_operation(op).unwrap();
4381
4382		let bc = backend.blockchain();
4383		assert_eq!(None, bc.body(block_1b).unwrap());
4384		assert_eq!(None, bc.body(block_2b).unwrap());
4385		assert_eq!(
4386			Some(vec![UncheckedXt::new_transaction(0x00.into(), ())]),
4387			bc.body(block_0).unwrap()
4388		);
4389		assert_eq!(
4390			Some(vec![UncheckedXt::new_transaction(0x1a.into(), ())]),
4391			bc.body(block_1a).unwrap()
4392		);
4393		assert_eq!(
4394			Some(vec![UncheckedXt::new_transaction(0x2a.into(), ())]),
4395			bc.body(block_2a).unwrap()
4396		);
4397		assert_eq!(
4398			Some(vec![UncheckedXt::new_transaction(0x3a.into(), ())]),
4399			bc.body(block_3a).unwrap()
4400		);
4401	}
4402
4403	#[test]
4404	fn indexed_data_block_body() {
4405		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
4406
4407		let x0 = UncheckedXt::new_transaction(0.into(), ()).encode();
4408		let x1 = UncheckedXt::new_transaction(1.into(), ()).encode();
4409		let x0_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x0[1..]);
4410		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
4411		let index = vec![
4412			IndexOperation::Insert {
4413				extrinsic: 0,
4414				hash: x0_hash.as_ref().to_vec(),
4415				size: (x0.len() - 1) as u32,
4416			},
4417			IndexOperation::Insert {
4418				extrinsic: 1,
4419				hash: x1_hash.as_ref().to_vec(),
4420				size: (x1.len() - 1) as u32,
4421			},
4422		];
4423		let hash = insert_block(
4424			&backend,
4425			0,
4426			Default::default(),
4427			None,
4428			Default::default(),
4429			vec![
4430				UncheckedXt::new_transaction(0.into(), ()),
4431				UncheckedXt::new_transaction(1.into(), ()),
4432			],
4433			Some(index),
4434		)
4435		.unwrap();
4436		let bc = backend.blockchain();
4437		assert_eq!(bc.indexed_transaction(x0_hash).unwrap().unwrap(), &x0[1..]);
4438		assert_eq!(bc.indexed_transaction(x1_hash).unwrap().unwrap(), &x1[1..]);
4439
4440		let hashof0 = bc.info().genesis_hash;
4441		// Push one more blocks and make sure block is pruned and transaction index is cleared.
4442		let block1 =
4443			insert_block(&backend, 1, hash, None, Default::default(), vec![], None).unwrap();
4444		backend.finalize_block(block1, None).unwrap();
4445		assert_eq!(bc.body(hashof0).unwrap(), None);
4446		assert_eq!(bc.indexed_transaction(x0_hash).unwrap(), None);
4447		assert_eq!(bc.indexed_transaction(x1_hash).unwrap(), None);
4448	}
4449
4450	#[test]
4451	fn index_invalid_size() {
4452		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
4453
4454		let x0 = UncheckedXt::new_transaction(0.into(), ()).encode();
4455		let x1 = UncheckedXt::new_transaction(1.into(), ()).encode();
4456
4457		let x0_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x0[..]);
4458		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[..]);
4459		let index = vec![
4460			IndexOperation::Insert {
4461				extrinsic: 0,
4462				hash: x0_hash.as_ref().to_vec(),
4463				size: (x0.len()) as u32,
4464			},
4465			IndexOperation::Insert {
4466				extrinsic: 1,
4467				hash: x1_hash.as_ref().to_vec(),
4468				size: (x1.len() + 1) as u32,
4469			},
4470		];
4471		insert_block(
4472			&backend,
4473			0,
4474			Default::default(),
4475			None,
4476			Default::default(),
4477			vec![
4478				UncheckedXt::new_transaction(0.into(), ()),
4479				UncheckedXt::new_transaction(1.into(), ()),
4480			],
4481			Some(index),
4482		)
4483		.unwrap();
4484		let bc = backend.blockchain();
4485		assert_eq!(bc.indexed_transaction(x0_hash).unwrap().unwrap(), &x0[..]);
4486		assert_eq!(bc.indexed_transaction(x1_hash).unwrap(), None);
4487	}
4488
4489	#[test]
4490	fn renew_transaction_storage() {
4491		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
4492		let mut blocks = Vec::new();
4493		let mut prev_hash = Default::default();
4494		let x1 = UncheckedXt::new_transaction(0.into(), ()).encode();
4495		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
4496		for i in 0..10 {
4497			let mut index = Vec::new();
4498			if i == 0 {
4499				index.push(IndexOperation::Insert {
4500					extrinsic: 0,
4501					hash: x1_hash.as_ref().to_vec(),
4502					size: (x1.len() - 1) as u32,
4503				});
4504			} else if i < 5 {
4505				// keep renewing 1st
4506				index.push(IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() });
4507			} // else stop renewing
4508			let hash = insert_block(
4509				&backend,
4510				i,
4511				prev_hash,
4512				None,
4513				Default::default(),
4514				vec![UncheckedXt::new_transaction(i.into(), ())],
4515				Some(index),
4516			)
4517			.unwrap();
4518			blocks.push(hash);
4519			prev_hash = hash;
4520		}
4521
4522		for i in 1..10 {
4523			let mut op = backend.begin_operation().unwrap();
4524			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4525			op.mark_finalized(blocks[i], None).unwrap();
4526			backend.commit_operation(op).unwrap();
4527			let bc = backend.blockchain();
4528			if i < 6 {
4529				assert!(bc.indexed_transaction(x1_hash).unwrap().is_some());
4530			} else {
4531				assert!(bc.indexed_transaction(x1_hash).unwrap().is_none());
4532			}
4533		}
4534	}
4535
4536	#[test]
4537	fn multi_renew_transaction_storage() {
4538		// Test that multiple renewals within a single extrinsic work correctly
4539		// and that data survives across the renewal window.
4540		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
4541		let mut blocks = Vec::new();
4542		let mut prev_hash = Default::default();
4543
4544		// Two distinct data items
4545		let x1 = UncheckedXt::new_transaction(0.into(), ()).encode();
4546		let x2 = UncheckedXt::new_transaction(1.into(), ()).encode();
4547		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
4548		let x2_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x2[1..]);
4549
4550		for i in 0..10 {
4551			let mut index = Vec::new();
4552			if i == 0 {
4553				// Block 0: Insert both items as separate extrinsics
4554				index.push(IndexOperation::Insert {
4555					extrinsic: 0,
4556					hash: x1_hash.as_ref().to_vec(),
4557					size: (x1.len() - 1) as u32,
4558				});
4559				index.push(IndexOperation::Insert {
4560					extrinsic: 1,
4561					hash: x2_hash.as_ref().to_vec(),
4562					size: (x2.len() - 1) as u32,
4563				});
4564			} else if i < 5 {
4565				// Blocks 1-4: Renew BOTH items in a single extrinsic (multi-renew)
4566				index.push(IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() });
4567				index.push(IndexOperation::Renew { extrinsic: 0, hash: x2_hash.as_ref().to_vec() });
4568			}
4569			// Blocks 5+: stop renewing
4570
4571			let body = if i == 0 {
4572				vec![
4573					UncheckedXt::new_transaction(0.into(), ()),
4574					UncheckedXt::new_transaction(1.into(), ()),
4575				]
4576			} else {
4577				vec![UncheckedXt::new_transaction(i.into(), ())]
4578			};
4579			let hash =
4580				insert_block(&backend, i, prev_hash, None, Default::default(), body, Some(index))
4581					.unwrap();
4582			blocks.push(hash);
4583			prev_hash = hash;
4584		}
4585
4586		// Finalize progressively and check that both items survive while renewed
4587		for i in 1..10 {
4588			let mut op = backend.begin_operation().unwrap();
4589			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4590			op.mark_finalized(blocks[i], None).unwrap();
4591			backend.commit_operation(op).unwrap();
4592			let bc = backend.blockchain();
4593			if i < 6 {
4594				assert!(
4595					bc.indexed_transaction(x1_hash).unwrap().is_some(),
4596					"x1 should exist at finalization step {i}"
4597				);
4598				assert!(
4599					bc.indexed_transaction(x2_hash).unwrap().is_some(),
4600					"x2 should exist at finalization step {i}"
4601				);
4602			} else {
4603				assert!(
4604					bc.indexed_transaction(x1_hash).unwrap().is_none(),
4605					"x1 should be pruned at finalization step {i}"
4606				);
4607				assert!(
4608					bc.indexed_transaction(x2_hash).unwrap().is_none(),
4609					"x2 should be pruned at finalization step {i}"
4610				);
4611			}
4612		}
4613	}
4614
4615	#[test]
4616	fn multi_renew_block_indexed_body() {
4617		// Test that block_indexed_body returns data for all hashes in a MultiRenew extrinsic.
4618		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(10), 10);
4619
4620		let x1 = UncheckedXt::new_transaction(0.into(), ()).encode();
4621		let x2 = UncheckedXt::new_transaction(1.into(), ()).encode();
4622		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
4623		let x2_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x2[1..]);
4624
4625		// Block 0: Insert both items
4626		let block0 = insert_block(
4627			&backend,
4628			0,
4629			Default::default(),
4630			None,
4631			Default::default(),
4632			vec![
4633				UncheckedXt::new_transaction(0.into(), ()),
4634				UncheckedXt::new_transaction(1.into(), ()),
4635			],
4636			Some(vec![
4637				IndexOperation::Insert {
4638					extrinsic: 0,
4639					hash: x1_hash.as_ref().to_vec(),
4640					size: (x1.len() - 1) as u32,
4641				},
4642				IndexOperation::Insert {
4643					extrinsic: 1,
4644					hash: x2_hash.as_ref().to_vec(),
4645					size: (x2.len() - 1) as u32,
4646				},
4647			]),
4648		)
4649		.unwrap();
4650
4651		// Block 1: Multi-renew both in a single extrinsic
4652		let block1 = insert_block(
4653			&backend,
4654			1,
4655			block0,
4656			None,
4657			Default::default(),
4658			vec![UncheckedXt::new_transaction(10.into(), ())],
4659			Some(vec![
4660				IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() },
4661				IndexOperation::Renew { extrinsic: 0, hash: x2_hash.as_ref().to_vec() },
4662			]),
4663		)
4664		.unwrap();
4665
4666		let bc = backend.blockchain();
4667		let indexed_body = bc.block_indexed_body(block1).unwrap().unwrap();
4668		assert_eq!(indexed_body.len(), 2, "Should have 2 indexed data blobs");
4669		assert_eq!(&indexed_body[0][..], &x1[1..]);
4670		assert_eq!(&indexed_body[1][..], &x2[1..]);
4671	}
4672
4673	#[test]
4674	fn multi_renew_prune_releases_all() {
4675		// Test that pruning a block with MultiRenew correctly releases all ref counts.
4676		// Use BlocksPruning::Some(2) and build enough blocks so both the insert block
4677		// and the multi-renew block get pruned.
4678		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
4679		let mut blocks = Vec::new();
4680		let mut prev_hash = Default::default();
4681
4682		let x1 = UncheckedXt::new_transaction(0.into(), ()).encode();
4683		let x2 = UncheckedXt::new_transaction(1.into(), ()).encode();
4684		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
4685		let x2_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x2[1..]);
4686
4687		for i in 0..6 {
4688			let mut index = Vec::new();
4689			let body = if i == 0 {
4690				// Block 0: Insert both items
4691				index.push(IndexOperation::Insert {
4692					extrinsic: 0,
4693					hash: x1_hash.as_ref().to_vec(),
4694					size: (x1.len() - 1) as u32,
4695				});
4696				index.push(IndexOperation::Insert {
4697					extrinsic: 1,
4698					hash: x2_hash.as_ref().to_vec(),
4699					size: (x2.len() - 1) as u32,
4700				});
4701				vec![
4702					UncheckedXt::new_transaction(0.into(), ()),
4703					UncheckedXt::new_transaction(1.into(), ()),
4704				]
4705			} else if i == 1 {
4706				// Block 1: Multi-renew both in one extrinsic
4707				index.push(IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() });
4708				index.push(IndexOperation::Renew { extrinsic: 0, hash: x2_hash.as_ref().to_vec() });
4709				vec![UncheckedXt::new_transaction(10.into(), ())]
4710			} else {
4711				// Blocks 2+: empty, just advancing
4712				vec![UncheckedXt::new_transaction(i.into(), ())]
4713			};
4714			let hash =
4715				insert_block(&backend, i, prev_hash, None, Default::default(), body, Some(index))
4716					.unwrap();
4717			blocks.push(hash);
4718			prev_hash = hash;
4719		}
4720
4721		let bc = backend.blockchain();
4722		// Before finalization, data exists
4723		assert!(bc.indexed_transaction(x1_hash).unwrap().is_some());
4724		assert!(bc.indexed_transaction(x2_hash).unwrap().is_some());
4725
4726		// Finalize progressively
4727		for i in 1..6 {
4728			let mut op = backend.begin_operation().unwrap();
4729			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4730			op.mark_finalized(blocks[i], None).unwrap();
4731			backend.commit_operation(op).unwrap();
4732		}
4733
4734		// After finalizing block 5 with pruning=2, blocks 0-3 are pruned.
4735		// Both insert (block 0) and multi-renew (block 1) refs are released.
4736		assert!(
4737			bc.indexed_transaction(x1_hash).unwrap().is_none(),
4738			"x1 should be gone after all referring blocks are pruned"
4739		);
4740		assert!(
4741			bc.indexed_transaction(x2_hash).unwrap().is_none(),
4742			"x2 should be gone after all referring blocks are pruned"
4743		);
4744	}
4745
4746	#[test]
4747	fn multi_renew_body_reconstruction() {
4748		// Test that body_uncached can reconstruct extrinsics from MultiRenew blocks.
4749		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(10), 10);
4750
4751		let x1 = UncheckedXt::new_transaction(0.into(), ()).encode();
4752		let x2 = UncheckedXt::new_transaction(1.into(), ()).encode();
4753		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
4754		let x2_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x2[1..]);
4755
4756		// Block 0: Insert both
4757		let block0 = insert_block(
4758			&backend,
4759			0,
4760			Default::default(),
4761			None,
4762			Default::default(),
4763			vec![
4764				UncheckedXt::new_transaction(0.into(), ()),
4765				UncheckedXt::new_transaction(1.into(), ()),
4766			],
4767			Some(vec![
4768				IndexOperation::Insert {
4769					extrinsic: 0,
4770					hash: x1_hash.as_ref().to_vec(),
4771					size: (x1.len() - 1) as u32,
4772				},
4773				IndexOperation::Insert {
4774					extrinsic: 1,
4775					hash: x2_hash.as_ref().to_vec(),
4776					size: (x2.len() - 1) as u32,
4777				},
4778			]),
4779		)
4780		.unwrap();
4781
4782		// Block 1: Multi-renew both in one extrinsic
4783		let renew_xt = UncheckedXt::new_transaction(10.into(), ());
4784		let block1 = insert_block(
4785			&backend,
4786			1,
4787			block0,
4788			None,
4789			Default::default(),
4790			vec![renew_xt.clone()],
4791			Some(vec![
4792				IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() },
4793				IndexOperation::Renew { extrinsic: 0, hash: x2_hash.as_ref().to_vec() },
4794			]),
4795		)
4796		.unwrap();
4797
4798		// Reconstruct body from block 1
4799		let bc = backend.blockchain();
4800		let body = bc.body(block1).unwrap().unwrap();
4801		assert_eq!(body.len(), 1, "Block 1 has one extrinsic");
4802		assert_eq!(body[0], renew_xt, "Extrinsic should be reconstructed correctly");
4803	}
4804
4805	#[test]
4806	fn single_renew_backwards_compatible() {
4807		// Verify that a single renewal per extrinsic still uses DbExtrinsic::Indexed,
4808		// preserving backwards compatibility.
4809		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
4810		let mut prev_hash = Default::default();
4811
4812		let x1 = UncheckedXt::new_transaction(0.into(), ()).encode();
4813		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
4814
4815		// Block 0: Insert
4816		let block0 = insert_block(
4817			&backend,
4818			0,
4819			prev_hash,
4820			None,
4821			Default::default(),
4822			vec![UncheckedXt::new_transaction(0.into(), ())],
4823			Some(vec![IndexOperation::Insert {
4824				extrinsic: 0,
4825				hash: x1_hash.as_ref().to_vec(),
4826				size: (x1.len() - 1) as u32,
4827			}]),
4828		)
4829		.unwrap();
4830		prev_hash = block0;
4831
4832		// Block 1: Single renew (should produce Indexed, not MultiRenew)
4833		let block1 = insert_block(
4834			&backend,
4835			1,
4836			prev_hash,
4837			None,
4838			Default::default(),
4839			vec![UncheckedXt::new_transaction(1.into(), ())],
4840			Some(vec![IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() }]),
4841		)
4842		.unwrap();
4843
4844		// Verify data is accessible
4845		let bc = backend.blockchain();
4846		assert!(bc.indexed_transaction(x1_hash).unwrap().is_some());
4847
4848		// Verify body can be reconstructed (confirms Indexed variant works)
4849		let body = bc.body(block1).unwrap().unwrap();
4850		assert_eq!(body.len(), 1);
4851		assert_eq!(body[0], UncheckedXt::new_transaction(1.into(), ()));
4852
4853		// Verify block_indexed_body returns the data
4854		let indexed = bc.block_indexed_body(block1).unwrap().unwrap();
4855		assert_eq!(indexed.len(), 1);
4856		assert_eq!(&indexed[0][..], &x1[1..]);
4857	}
4858
4859	#[test]
4860	fn multi_renew_duplicate_hash_balanced_lifecycle() {
4861		// A block emitting `[Renew{0, X}, Renew{0, X}]` for the same hash twice gets
4862		// dedup'd by `renewed_map`'s BTreeSet to a single entry, so the slot is stored
4863		// as `Indexed { hash: X, .. }` (not MultiRenew) and contributes +1 ref. The
4864		// block must release that +1 cleanly when it is pruned. Asymmetric +/- (e.g.
4865		// if dedup was skipped on the +1 side but kept on -1, or vice versa) would
4866		// either leak X forever or panic at parity-db level.
4867		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
4868		let mut blocks = Vec::new();
4869		let mut prev_hash = Default::default();
4870
4871		let x1 = UncheckedXt::new_transaction(0.into(), ()).encode();
4872		let x1_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
4873
4874		// Variant-shape assertion: this is the realistic shape produced by
4875		// `utility.batch(renew[X], renew[X], ...)`. Confirm directly that N >= 2 Renews
4876		// of the same hash collapse to `Indexed` (not `MultiRenew`) after BTreeSet dedup,
4877		// independent of the lifecycle test below. Tested at N = 3 to make the dedup
4878		// non-trivial.
4879		{
4880			let mut tx: Transaction<DbHash> = Transaction::new();
4881			let dummy_body = vec![UncheckedXt::new_transaction(10.into(), ())];
4882			let dup_ops = vec![
4883				IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() },
4884				IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() },
4885				IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() },
4886			];
4887			let encoded = apply_index_ops::<Block>(&mut tx, dummy_body, dup_ops);
4888			let decoded: Vec<DbExtrinsic<Block>> =
4889				Decode::decode(&mut &encoded[..]).expect("apply_index_ops output must decode");
4890			assert_eq!(decoded.len(), 1);
4891			assert!(
4892				matches!(decoded[0], DbExtrinsic::Indexed { .. }),
4893				"N duplicate Renews of the same hash must dedup to Indexed (not MultiRenew); \
4894				 got {:?}",
4895				decoded[0],
4896			);
4897		}
4898
4899		for i in 0..6 {
4900			let mut index = Vec::new();
4901			let body = if i == 0 {
4902				index.push(IndexOperation::Insert {
4903					extrinsic: 0,
4904					hash: x1_hash.as_ref().to_vec(),
4905					size: (x1.len() - 1) as u32,
4906				});
4907				vec![UncheckedXt::new_transaction(0.into(), ())]
4908			} else if i == 1 {
4909				// Duplicate Renew at the same extrinsic index — the scenario this test exists for.
4910				index.push(IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() });
4911				index.push(IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() });
4912				vec![UncheckedXt::new_transaction(10.into(), ())]
4913			} else {
4914				vec![UncheckedXt::new_transaction(i.into(), ())]
4915			};
4916			let hash =
4917				insert_block(&backend, i, prev_hash, None, Default::default(), body, Some(index))
4918					.unwrap();
4919			blocks.push(hash);
4920			prev_hash = hash;
4921		}
4922
4923		let bc = backend.blockchain();
4924		// Pre-finalization: X is alive (refcount = 1 from insert + 1 from dedup'd renew = 2)
4925		assert!(bc.indexed_transaction(x1_hash).unwrap().is_some());
4926
4927		// Finalize step-by-step. With BlocksPruning::Some(2), after finalizing block N
4928		// blocks 0..(N-2) are pruned (last 2 finalized blocks are kept).
4929		for i in 1..6 {
4930			let mut op = backend.begin_operation().unwrap();
4931			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4932			op.mark_finalized(blocks[i], None).unwrap();
4933			backend.commit_operation(op).unwrap();
4934		}
4935
4936		// After all finalization, blocks 0 and 1 are both pruned. If dedup was applied
4937		// asymmetrically (e.g. on the +1 side but not the -1 side), X would either leak
4938		// or over-release; the assertion below catches both cases.
4939		assert!(
4940			bc.indexed_transaction(x1_hash).unwrap().is_none(),
4941			"X should be deleted: insert (+1) + dedup'd renew block (+1) all pruned",
4942		);
4943	}
4944
4945	#[test]
4946	fn multi_renew_mixed_duplicates_and_uniques() {
4947		// `[Renew{0, W}, Renew{0, X}, Renew{0, Y}, Renew{0, W}, Renew{0, Z}]` — the realistic
4948		// shape of a `utility.batch(renew[..])` call where one content_hash appears twice
4949		// alongside several uniques. 5 ops, 4 distinct hashes, 1 duplicate.
4950		// Locks in three things at once:
4951		// (1) Stored hashes are deduplicated (the duplicate W collapses to a single entry).
4952		// (2) block_indexed_body returns one blob per distinct hash; lookup is by membership,
4953		//     not position (BTreeSet iteration order is by hash bytes, not insertion order).
4954		// (3) Refcount lifecycle is correct after dedup: each of W/X/Y/Z gets +1 from the
4955		//     multi-renew block (not +2 for W), and all are released when the block is pruned.
4956		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
4957		let mut blocks = Vec::new();
4958		let mut prev_hash = Default::default();
4959
4960		let w = UncheckedXt::new_transaction(0.into(), ()).encode();
4961		let x = UncheckedXt::new_transaction(1.into(), ()).encode();
4962		let y = UncheckedXt::new_transaction(2.into(), ()).encode();
4963		let z = UncheckedXt::new_transaction(3.into(), ()).encode();
4964		let w_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&w[1..]);
4965		let x_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x[1..]);
4966		let y_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&y[1..]);
4967		let z_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&z[1..]);
4968
4969		for i in 0..6 {
4970			let mut index = Vec::new();
4971			let body = if i == 0 {
4972				index.push(IndexOperation::Insert {
4973					extrinsic: 0,
4974					hash: w_hash.as_ref().to_vec(),
4975					size: (w.len() - 1) as u32,
4976				});
4977				index.push(IndexOperation::Insert {
4978					extrinsic: 1,
4979					hash: x_hash.as_ref().to_vec(),
4980					size: (x.len() - 1) as u32,
4981				});
4982				index.push(IndexOperation::Insert {
4983					extrinsic: 2,
4984					hash: y_hash.as_ref().to_vec(),
4985					size: (y.len() - 1) as u32,
4986				});
4987				index.push(IndexOperation::Insert {
4988					extrinsic: 3,
4989					hash: z_hash.as_ref().to_vec(),
4990					size: (z.len() - 1) as u32,
4991				});
4992				vec![
4993					UncheckedXt::new_transaction(0.into(), ()),
4994					UncheckedXt::new_transaction(1.into(), ()),
4995					UncheckedXt::new_transaction(2.into(), ()),
4996					UncheckedXt::new_transaction(3.into(), ()),
4997				]
4998			} else if i == 1 {
4999				// 5 ops: W appears twice (positions 0 and 3), X/Y/Z once each.
5000				index.push(IndexOperation::Renew { extrinsic: 0, hash: w_hash.as_ref().to_vec() });
5001				index.push(IndexOperation::Renew { extrinsic: 0, hash: x_hash.as_ref().to_vec() });
5002				index.push(IndexOperation::Renew { extrinsic: 0, hash: y_hash.as_ref().to_vec() });
5003				index.push(IndexOperation::Renew { extrinsic: 0, hash: w_hash.as_ref().to_vec() });
5004				index.push(IndexOperation::Renew { extrinsic: 0, hash: z_hash.as_ref().to_vec() });
5005				vec![UncheckedXt::new_transaction(10.into(), ())]
5006			} else {
5007				vec![UncheckedXt::new_transaction(i.into(), ())]
5008			};
5009			let hash =
5010				insert_block(&backend, i, prev_hash, None, Default::default(), body, Some(index))
5011					.unwrap();
5012			blocks.push(hash);
5013			prev_hash = hash;
5014		}
5015
5016		let bc = backend.blockchain();
5017
5018		// (1) and (2): block_indexed_body returns one blob per distinct hash, regardless
5019		// of how many times that hash appeared in the input ops. Order is by hash bytes
5020		// (BTreeSet iteration), so we check membership rather than positions.
5021		let indexed_body = bc.block_indexed_body(blocks[1]).unwrap().unwrap();
5022		assert_eq!(indexed_body.len(), 4, "duplicate W collapsed; 4 distinct blobs remain");
5023		let blobs: BTreeSet<&[u8]> = indexed_body.iter().map(|b| b.as_slice()).collect();
5024		assert!(blobs.contains(&&w[1..]), "W blob must be present");
5025		assert!(blobs.contains(&&x[1..]), "X blob must be present");
5026		assert!(blobs.contains(&&y[1..]), "Y blob must be present");
5027		assert!(blobs.contains(&&z[1..]), "Z blob must be present");
5028
5029		// (3) Lifecycle: finalize through pruning.
5030		for i in 1..6 {
5031			let mut op = backend.begin_operation().unwrap();
5032			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
5033			op.mark_finalized(blocks[i], None).unwrap();
5034			backend.commit_operation(op).unwrap();
5035		}
5036
5037		// After all blocks pruned: refcounts zero out. With BTreeSet dedup the multi-renew
5038		// block contributed exactly +1 per distinct hash, so each is released cleanly when
5039		// that block is pruned.
5040		// W: insert(+1) + multi-renew(+1) = 2, all pruned = -2 ✓
5041		// X: insert(+1) + multi-renew(+1) = 2, all pruned = -2 ✓
5042		// Y: insert(+1) + multi-renew(+1) = 2, all pruned = -2 ✓
5043		// Z: insert(+1) + multi-renew(+1) = 2, all pruned = -2 ✓
5044		assert!(bc.indexed_transaction(w_hash).unwrap().is_none(), "W deleted");
5045		assert!(bc.indexed_transaction(x_hash).unwrap().is_none(), "X deleted");
5046		assert!(bc.indexed_transaction(y_hash).unwrap().is_none(), "Y deleted");
5047		assert!(bc.indexed_transaction(z_hash).unwrap().is_none(), "Z deleted");
5048	}
5049
5050	#[test]
5051	fn insert_and_renew_same_index_renew_wins() {
5052		// Documents the pre-existing precedence in apply_index_ops: when both an Insert
5053		// and a Renew op target the same extrinsic_index, the Renew wins and the Insert
5054		// is silently discarded — the Insert's data write to the TRANSACTION column
5055		// never happens.
5056		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(10), 10);
5057
5058		let x = UncheckedXt::new_transaction(0.into(), ()).encode();
5059		let y = UncheckedXt::new_transaction(1.into(), ()).encode();
5060		let x_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x[1..]);
5061		let y_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&y[1..]);
5062
5063		// Block 0: Insert X normally — X is now stored.
5064		let block0 = insert_block(
5065			&backend,
5066			0,
5067			Default::default(),
5068			None,
5069			Default::default(),
5070			vec![UncheckedXt::new_transaction(0.into(), ())],
5071			Some(vec![IndexOperation::Insert {
5072				extrinsic: 0,
5073				hash: x_hash.as_ref().to_vec(),
5074				size: (x.len() - 1) as u32,
5075			}]),
5076		)
5077		.unwrap();
5078
5079		// Block 1: ops contain BOTH Insert{0, Y, ...} and Renew{0, X} for the same extrinsic.
5080		// Per apply_index_ops precedence, Renew wins and Insert{Y} is silently dropped.
5081		let block1 = insert_block(
5082			&backend,
5083			1,
5084			block0,
5085			None,
5086			Default::default(),
5087			vec![UncheckedXt::new_transaction(99.into(), ())],
5088			Some(vec![
5089				IndexOperation::Insert {
5090					extrinsic: 0,
5091					hash: y_hash.as_ref().to_vec(),
5092					size: (y.len() - 1) as u32,
5093				},
5094				IndexOperation::Renew { extrinsic: 0, hash: x_hash.as_ref().to_vec() },
5095			]),
5096		)
5097		.unwrap();
5098
5099		let bc = backend.blockchain();
5100
5101		// X exists from block 0's Insert; block 1's Renew bumped its refcount.
5102		assert!(
5103			bc.indexed_transaction(x_hash).unwrap().is_some(),
5104			"X stored at block 0; renewed at block 1",
5105		);
5106
5107		// Y was NEVER stored — the Insert{0, Y, ...} op was discarded because Renew won.
5108		assert!(
5109			bc.indexed_transaction(y_hash).unwrap().is_none(),
5110			"Y must NOT be stored — its Insert was silently discarded by Renew precedence",
5111		);
5112
5113		// Block 1's BODY_INDEX entry references X only (single hash from Renew),
5114		// so block_indexed_body returns the data at X's hash.
5115		let indexed = bc.block_indexed_body(block1).unwrap().unwrap();
5116		assert_eq!(indexed.len(), 1, "single Indexed entry from the Renew");
5117		assert_eq!(&indexed[0][..], &x[1..], "data is X (the renewed hash)");
5118	}
5119
5120	#[test]
5121	fn db_extrinsic_encoding_round_trip() {
5122		// SCALE round-trip stability for all three variants. Catches future variant
5123		// reordering or field reshape that would corrupt on-disk BODY_INDEX entries.
5124		// DbExtrinsic doesn't derive PartialEq so we re-encode and compare bytes.
5125		let entries: Vec<DbExtrinsic<Block>> = vec![
5126			DbExtrinsic::Indexed { hash: H256::repeat_byte(0xAA), header: vec![0x01, 0x02, 0x03] },
5127			DbExtrinsic::Full(UncheckedXt::new_transaction(42.into(), ())),
5128			DbExtrinsic::MultiRenew {
5129				hashes: BTreeSet::from([H256::repeat_byte(0xBB), H256::repeat_byte(0xCC)]),
5130				extrinsic: vec![0x04, 0x05, 0x06, 0x07],
5131			},
5132		];
5133
5134		let encoded = entries.encode();
5135		let decoded: Vec<DbExtrinsic<Block>> =
5136			Decode::decode(&mut &encoded[..]).expect("encoded DbExtrinsic vec must decode");
5137		let re_encoded = decoded.encode();
5138		assert_eq!(encoded, re_encoded, "encode -> decode -> encode must be byte-stable");
5139
5140		// Sanity-check structural shape of each decoded entry.
5141		assert_eq!(decoded.len(), 3);
5142		assert!(matches!(decoded[0], DbExtrinsic::Indexed { .. }));
5143		assert!(matches!(decoded[1], DbExtrinsic::Full(_)));
5144		match &decoded[2] {
5145			DbExtrinsic::MultiRenew { hashes, extrinsic } => {
5146				assert_eq!(hashes.len(), 2);
5147				assert!(hashes.contains(&H256::repeat_byte(0xBB)));
5148				assert!(hashes.contains(&H256::repeat_byte(0xCC)));
5149				assert_eq!(extrinsic, &vec![0x04, 0x05, 0x06, 0x07]);
5150			},
5151			other => panic!("expected MultiRenew, got {other:?}"),
5152		}
5153	}
5154
5155	#[test]
5156	fn apply_index_ops_deterministic() {
5157		// `renewed_map` uses BTreeSet<DbHash> so encoded bytes are deterministic across
5158		// nodes (sorted by hash, dedup'd). The BODY_INDEX entry is part of the on-disk
5159		// representation that must match across replicas. Pin determinism with a test
5160		// instead of relying on convention.
5161		let body = vec![
5162			UncheckedXt::new_transaction(0.into(), ()),
5163			UncheckedXt::new_transaction(1.into(), ()),
5164		];
5165		let h1 = H256::repeat_byte(0x11).as_ref().to_vec();
5166		let h2 = H256::repeat_byte(0x22).as_ref().to_vec();
5167		let h3 = H256::repeat_byte(0x33).as_ref().to_vec();
5168
5169		let ops = vec![
5170			IndexOperation::Renew { extrinsic: 0, hash: h1.clone() },
5171			IndexOperation::Renew { extrinsic: 0, hash: h2.clone() },
5172			IndexOperation::Renew { extrinsic: 0, hash: h1.clone() }, // duplicate
5173			IndexOperation::Renew { extrinsic: 1, hash: h3.clone() },
5174		];
5175
5176		let mut tx1: Transaction<DbHash> = Transaction::new();
5177		let bytes1 = apply_index_ops::<Block>(&mut tx1, body.clone(), ops.clone());
5178
5179		let mut tx2: Transaction<DbHash> = Transaction::new();
5180		let bytes2 = apply_index_ops::<Block>(&mut tx2, body, ops);
5181
5182		assert_eq!(bytes1, bytes2, "apply_index_ops must be deterministic across calls");
5183
5184		// Sanity: ensure we actually built a multi-renew variant (so the test exercises
5185		// the new codepath, not just the trivial Indexed/Full branches).
5186		let decoded: Vec<DbExtrinsic<Block>> =
5187			Decode::decode(&mut &bytes1[..]).expect("apply_index_ops output must decode");
5188		assert_eq!(decoded.len(), 2);
5189		assert!(
5190			matches!(decoded[0], DbExtrinsic::MultiRenew { .. }),
5191			"index 0 (3 renew ops including duplicate) must be MultiRenew",
5192		);
5193		assert!(
5194			matches!(decoded[1], DbExtrinsic::Indexed { .. }),
5195			"index 1 (single renew op) must be Indexed",
5196		);
5197	}
5198
5199	#[test]
5200	fn multi_renew_in_one_block_indexed_in_another() {
5201		// Cross-block lifecycle: same content hash X is referenced by three blocks
5202		// via two different DbExtrinsic shapes (the duplicate-renew block dedups to
5203		// a single hash and so also takes the Indexed shape):
5204		//   - Block 0 (Insert):                 `Indexed { hash: X, header: partial }`  → +1 ref
5205		//   - Block 1 (single Renew):           `Indexed { hash: X, header: full }`     → +1 ref
5206		//   - Block 2 (Renew{X}, Renew{X}):     `Indexed { hash: X, header: full }`     → +1 ref
5207		//     (BTreeSet collapses the duplicate; len == 1, so it's not MultiRenew)
5208		// Total refcount peaks at 3. Each block's prune must release exactly its
5209		// contribution. Realistic auto-renewal pattern on Bulletin chain.
5210		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
5211		let mut blocks = Vec::new();
5212		let mut prev_hash = Default::default();
5213
5214		let x = UncheckedXt::new_transaction(0.into(), ()).encode();
5215		let x_hash = <HashingFor<Block> as sp_core::Hasher>::hash(&x[1..]);
5216
5217		for i in 0..6 {
5218			let mut index = Vec::new();
5219			let body = if i == 0 {
5220				// Insert path: Indexed { hash, header: partial }
5221				index.push(IndexOperation::Insert {
5222					extrinsic: 0,
5223					hash: x_hash.as_ref().to_vec(),
5224					size: (x.len() - 1) as u32,
5225				});
5226				vec![UncheckedXt::new_transaction(0.into(), ())]
5227			} else if i == 1 {
5228				// Single Renew path: Indexed { hash, header: full_encoded }
5229				index.push(IndexOperation::Renew { extrinsic: 0, hash: x_hash.as_ref().to_vec() });
5230				vec![UncheckedXt::new_transaction(10.into(), ())]
5231			} else if i == 2 {
5232				// Two Renew ops with the same hash; BTreeSet dedups to one, so this
5233				// produces Indexed { hash: X, header: full } (not MultiRenew).
5234				index.push(IndexOperation::Renew { extrinsic: 0, hash: x_hash.as_ref().to_vec() });
5235				index.push(IndexOperation::Renew { extrinsic: 0, hash: x_hash.as_ref().to_vec() });
5236				vec![UncheckedXt::new_transaction(20.into(), ())]
5237			} else {
5238				// Empty filler blocks
5239				vec![UncheckedXt::new_transaction(i.into(), ())]
5240			};
5241			let hash =
5242				insert_block(&backend, i, prev_hash, None, Default::default(), body, Some(index))
5243					.unwrap();
5244			blocks.push(hash);
5245			prev_hash = hash;
5246		}
5247
5248		let bc = backend.blockchain();
5249		// Pre-finalization: refcount = 1 + 1 + 1 = 3 (block 2's duplicate dedup'd). X is alive.
5250		assert!(bc.indexed_transaction(x_hash).unwrap().is_some());
5251
5252		// Finalize step-by-step. With BlocksPruning::Some(2), after finalizing block N,
5253		// blocks 0..(N-2) are pruned (last 2 finalized blocks are kept).
5254		for i in 1..6 {
5255			let mut op = backend.begin_operation().unwrap();
5256			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
5257			op.mark_finalized(blocks[i], None).unwrap();
5258			backend.commit_operation(op).unwrap();
5259		}
5260
5261		// After finalize 5: blocks 0, 1, 2, 3 all pruned.
5262		// Each block's prune released its own contribution:
5263		//   block 0 (Indexed insert):                  -1
5264		//   block 1 (Indexed single-renew):            -1
5265		//   block 2 (Indexed dedup'd duplicate-renew): -1
5266		// Total releases = 3. Refcount = 3 - 3 = 0. X should be deleted.
5267		assert!(
5268			bc.indexed_transaction(x_hash).unwrap().is_none(),
5269			"X must be deleted: insert + single-renew + dedup'd-renew all pruned",
5270		);
5271	}
5272
5273	#[test]
5274	fn remove_leaf_block_works() {
5275		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
5276		let mut blocks = Vec::new();
5277		let mut prev_hash = Default::default();
5278		for i in 0..2 {
5279			let hash = insert_block(
5280				&backend,
5281				i,
5282				prev_hash,
5283				None,
5284				Default::default(),
5285				vec![UncheckedXt::new_transaction(i.into(), ())],
5286				None,
5287			)
5288			.unwrap();
5289			blocks.push(hash);
5290			prev_hash = hash;
5291		}
5292
5293		for i in 0..2 {
5294			let hash = insert_block(
5295				&backend,
5296				2,
5297				blocks[1],
5298				None,
5299				sp_core::H256::random(),
5300				vec![UncheckedXt::new_transaction(i.into(), ())],
5301				None,
5302			)
5303			.unwrap();
5304			blocks.push(hash);
5305		}
5306
5307		// insert a fork at block 1, which becomes best block
5308		let best_hash = insert_block(
5309			&backend,
5310			1,
5311			blocks[0],
5312			None,
5313			sp_core::H256::random(),
5314			vec![UncheckedXt::new_transaction(42.into(), ())],
5315			None,
5316		)
5317		.unwrap();
5318
5319		assert_eq!(backend.blockchain().info().best_hash, best_hash);
5320		assert!(backend.remove_leaf_block(best_hash).is_err());
5321
5322		assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], blocks[3], best_hash]);
5323		assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2], blocks[3]]);
5324
5325		assert!(backend.have_state_at(blocks[3], 2));
5326		assert!(backend.blockchain().header(blocks[3]).unwrap().is_some());
5327		backend.remove_leaf_block(blocks[3]).unwrap();
5328		assert!(!backend.have_state_at(blocks[3], 2));
5329		assert!(backend.blockchain().header(blocks[3]).unwrap().is_none());
5330		assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], best_hash]);
5331		assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2]]);
5332
5333		assert!(backend.have_state_at(blocks[2], 2));
5334		assert!(backend.blockchain().header(blocks[2]).unwrap().is_some());
5335		backend.remove_leaf_block(blocks[2]).unwrap();
5336		assert!(!backend.have_state_at(blocks[2], 2));
5337		assert!(backend.blockchain().header(blocks[2]).unwrap().is_none());
5338		assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash, blocks[1]]);
5339		assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![]);
5340
5341		assert!(backend.have_state_at(blocks[1], 1));
5342		assert!(backend.blockchain().header(blocks[1]).unwrap().is_some());
5343		backend.remove_leaf_block(blocks[1]).unwrap();
5344		assert!(!backend.have_state_at(blocks[1], 1));
5345		assert!(backend.blockchain().header(blocks[1]).unwrap().is_none());
5346		assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash]);
5347		assert_eq!(backend.blockchain().children(blocks[0]).unwrap(), vec![best_hash]);
5348	}
5349
5350	#[test]
5351	fn test_import_existing_block_as_new_head() {
5352		let backend: Backend<Block> = Backend::new_test(10, 3);
5353		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
5354		let block1 = insert_header(&backend, 1, block0, None, Default::default());
5355		let block2 = insert_header(&backend, 2, block1, None, Default::default());
5356		let block3 = insert_header(&backend, 3, block2, None, Default::default());
5357		let block4 = insert_header(&backend, 4, block3, None, Default::default());
5358		let block5 = insert_header(&backend, 5, block4, None, Default::default());
5359		assert_eq!(backend.blockchain().info().best_hash, block5);
5360
5361		// Insert 1 as best again. This should fail because canonicalization_delay == 3 and best ==
5362		// 5
5363		let header = Header {
5364			number: 1,
5365			parent_hash: block0,
5366			state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
5367			digest: Default::default(),
5368			extrinsics_root: Default::default(),
5369		};
5370		let mut op = backend.begin_operation().unwrap();
5371		op.set_block_data(header, None, None, None, NewBlockState::Best, true).unwrap();
5372		assert!(matches!(backend.commit_operation(op), Err(sp_blockchain::Error::SetHeadTooOld)));
5373
5374		// Insert 2 as best again.
5375		let header = backend.blockchain().header(block2).unwrap().unwrap();
5376		let mut op = backend.begin_operation().unwrap();
5377		op.set_block_data(header, None, None, None, NewBlockState::Best, true).unwrap();
5378		backend.commit_operation(op).unwrap();
5379		assert_eq!(backend.blockchain().info().best_hash, block2);
5380	}
5381
5382	#[test]
5383	fn test_import_existing_block_as_final() {
5384		let backend: Backend<Block> = Backend::new_test(10, 10);
5385		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
5386		let block1 = insert_header(&backend, 1, block0, None, Default::default());
5387		let _block2 = insert_header(&backend, 2, block1, None, Default::default());
5388		// Genesis is auto finalized, the rest are not.
5389		assert_eq!(backend.blockchain().info().finalized_hash, block0);
5390
5391		// Insert 1 as final again.
5392		let header = backend.blockchain().header(block1).unwrap().unwrap();
5393
5394		let mut op = backend.begin_operation().unwrap();
5395		op.set_block_data(header, None, None, None, NewBlockState::Final, true).unwrap();
5396		backend.commit_operation(op).unwrap();
5397
5398		assert_eq!(backend.blockchain().info().finalized_hash, block1);
5399	}
5400
5401	#[test]
5402	fn test_import_existing_state_fails() {
5403		let backend: Backend<Block> = Backend::new_test(10, 10);
5404		let genesis =
5405			insert_block(&backend, 0, Default::default(), None, Default::default(), vec![], None)
5406				.unwrap();
5407
5408		insert_block(&backend, 1, genesis, None, Default::default(), vec![], None).unwrap();
5409		let err = insert_block(&backend, 1, genesis, None, Default::default(), vec![], None)
5410			.err()
5411			.unwrap();
5412		match err {
5413			sp_blockchain::Error::StateDatabase(m) if m == "Block already exists" => (),
5414			e @ _ => panic!("Unexpected error {:?}", e),
5415		}
5416	}
5417
5418	#[test]
5419	fn test_leaves_not_created_for_ancient_blocks() {
5420		let backend: Backend<Block> = Backend::new_test(10, 10);
5421		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
5422
5423		let block1_a = insert_header(&backend, 1, block0, None, Default::default());
5424		let block2_a = insert_header(&backend, 2, block1_a, None, Default::default());
5425		backend.finalize_block(block1_a, None).unwrap();
5426		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]);
5427
5428		// Insert a fork prior to finalization point. Leave should not be created.
5429		insert_header_no_head(&backend, 1, block0, [1; 32].into());
5430		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]);
5431	}
5432
5433	#[test]
5434	fn revert_non_best_blocks() {
5435		let backend = Backend::<Block>::new_test(10, 10);
5436
5437		let genesis =
5438			insert_block(&backend, 0, Default::default(), None, Default::default(), vec![], None)
5439				.unwrap();
5440
5441		let block1 =
5442			insert_block(&backend, 1, genesis, None, Default::default(), vec![], None).unwrap();
5443
5444		let block2 =
5445			insert_block(&backend, 2, block1, None, Default::default(), vec![], None).unwrap();
5446
5447		let block3 = {
5448			let mut op = backend.begin_operation().unwrap();
5449			backend.begin_state_operation(&mut op, block1).unwrap();
5450			let header = Header {
5451				number: 3,
5452				parent_hash: block2,
5453				state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
5454				digest: Default::default(),
5455				extrinsics_root: Default::default(),
5456			};
5457
5458			op.set_block_data(
5459				header.clone(),
5460				Some(Vec::new()),
5461				None,
5462				None,
5463				NewBlockState::Normal,
5464				true,
5465			)
5466			.unwrap();
5467
5468			backend.commit_operation(op).unwrap();
5469
5470			header.hash()
5471		};
5472
5473		let block4 = {
5474			let mut op = backend.begin_operation().unwrap();
5475			backend.begin_state_operation(&mut op, block2).unwrap();
5476			let header = Header {
5477				number: 4,
5478				parent_hash: block3,
5479				state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
5480				digest: Default::default(),
5481				extrinsics_root: Default::default(),
5482			};
5483
5484			op.set_block_data(
5485				header.clone(),
5486				Some(Vec::new()),
5487				None,
5488				None,
5489				NewBlockState::Normal,
5490				true,
5491			)
5492			.unwrap();
5493
5494			backend.commit_operation(op).unwrap();
5495
5496			header.hash()
5497		};
5498
5499		let block3_fork = {
5500			let mut op = backend.begin_operation().unwrap();
5501			backend.begin_state_operation(&mut op, block2).unwrap();
5502			let header = Header {
5503				number: 3,
5504				parent_hash: block2,
5505				state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
5506				digest: Default::default(),
5507				extrinsics_root: H256::from_low_u64_le(42),
5508			};
5509
5510			op.set_block_data(
5511				header.clone(),
5512				Some(Vec::new()),
5513				None,
5514				None,
5515				NewBlockState::Normal,
5516				true,
5517			)
5518			.unwrap();
5519
5520			backend.commit_operation(op).unwrap();
5521
5522			header.hash()
5523		};
5524
5525		assert!(backend.have_state_at(block1, 1));
5526		assert!(backend.have_state_at(block2, 2));
5527		assert!(backend.have_state_at(block3, 3));
5528		assert!(backend.have_state_at(block4, 4));
5529		assert!(backend.have_state_at(block3_fork, 3));
5530
5531		assert_eq!(backend.blockchain.leaves().unwrap(), vec![block4, block3_fork]);
5532		assert_eq!(4, backend.blockchain.leaves.read().highest_leaf().unwrap().0);
5533
5534		assert_eq!(3, backend.revert(1, false).unwrap().0);
5535
5536		assert!(backend.have_state_at(block1, 1));
5537
5538		let ensure_pruned = |hash, number: u32| {
5539			assert_eq!(
5540				backend.blockchain.status(hash).unwrap(),
5541				sc_client_api::blockchain::BlockStatus::Unknown
5542			);
5543			assert!(
5544				backend
5545					.blockchain
5546					.db
5547					.get(columns::BODY, &number_and_hash_to_lookup_key(number, hash).unwrap())
5548					.is_none(),
5549				"{number}"
5550			);
5551			assert!(
5552				backend
5553					.blockchain
5554					.db
5555					.get(columns::HEADER, &number_and_hash_to_lookup_key(number, hash).unwrap())
5556					.is_none(),
5557				"{number}"
5558			);
5559		};
5560
5561		ensure_pruned(block2, 2);
5562		ensure_pruned(block3, 3);
5563		ensure_pruned(block4, 4);
5564		ensure_pruned(block3_fork, 3);
5565
5566		assert_eq!(backend.blockchain.leaves().unwrap(), vec![block1]);
5567		assert_eq!(1, backend.blockchain.leaves.read().highest_leaf().unwrap().0);
5568	}
5569
5570	#[test]
5571	fn revert_finalized_blocks() {
5572		let pruning_modes = [BlocksPruning::Some(10), BlocksPruning::KeepAll];
5573
5574		// we will create a chain with 11 blocks, finalize block #8 and then
5575		// attempt to revert 5 blocks.
5576		for pruning_mode in pruning_modes {
5577			let backend = Backend::<Block>::new_test_with_tx_storage(pruning_mode, 1);
5578
5579			let mut parent = Default::default();
5580			for i in 0..=10 {
5581				parent = insert_block(&backend, i, parent, None, Default::default(), vec![], None)
5582					.unwrap();
5583			}
5584
5585			assert_eq!(backend.blockchain().info().best_number, 10);
5586
5587			let block8 = backend.blockchain().hash(8).unwrap().unwrap();
5588			backend.finalize_block(block8, None).unwrap();
5589			backend.revert(5, true).unwrap();
5590
5591			match pruning_mode {
5592				// we can only revert to blocks for which we have state, if pruning is enabled
5593				// then the last state available will be that of the latest finalized block
5594				BlocksPruning::Some(_) => {
5595					assert_eq!(backend.blockchain().info().finalized_number, 8)
5596				},
5597				// otherwise if we're not doing state pruning we can revert past finalized blocks
5598				_ => assert_eq!(backend.blockchain().info().finalized_number, 5),
5599			}
5600		}
5601	}
5602
5603	#[test]
5604	fn test_no_duplicated_leaves_allowed() {
5605		let backend: Backend<Block> = Backend::new_test(10, 10);
5606		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
5607		let block1 = insert_header(&backend, 1, block0, None, Default::default());
5608		// Add block 2 not as the best block
5609		let block2 = insert_header_no_head(&backend, 2, block1, Default::default());
5610		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]);
5611		assert_eq!(backend.blockchain().info().best_hash, block1);
5612
5613		// Add block 2 as the best block
5614		let block2 = insert_header(&backend, 2, block1, None, Default::default());
5615		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]);
5616		assert_eq!(backend.blockchain().info().best_hash, block2);
5617	}
5618
5619	#[test]
5620	fn force_delayed_canonicalize_waiting_for_blocks_to_be_finalized() {
5621		let pruning_modes =
5622			[BlocksPruning::Some(10), BlocksPruning::KeepAll, BlocksPruning::KeepFinalized];
5623
5624		for pruning_mode in pruning_modes {
5625			eprintln!("Running with pruning mode: {:?}", pruning_mode);
5626
5627			let backend = Backend::<Block>::new_test_with_tx_storage(pruning_mode, 1);
5628
5629			let genesis = insert_block(
5630				&backend,
5631				0,
5632				Default::default(),
5633				None,
5634				Default::default(),
5635				vec![],
5636				None,
5637			)
5638			.unwrap();
5639
5640			let block1 = {
5641				let mut op = backend.begin_operation().unwrap();
5642				backend.begin_state_operation(&mut op, genesis).unwrap();
5643				let mut header = Header {
5644					number: 1,
5645					parent_hash: genesis,
5646					state_root: Default::default(),
5647					digest: Default::default(),
5648					extrinsics_root: Default::default(),
5649				};
5650
5651				let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))];
5652
5653				let (root, overlay) = op.old_state.storage_root(
5654					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
5655					StateVersion::V1,
5656				);
5657				op.update_db_storage(overlay).unwrap();
5658				header.state_root = root.into();
5659
5660				op.update_storage(storage, Vec::new()).unwrap();
5661
5662				op.set_block_data(
5663					header.clone(),
5664					Some(Vec::new()),
5665					None,
5666					None,
5667					NewBlockState::Normal,
5668					true,
5669				)
5670				.unwrap();
5671
5672				backend.commit_operation(op).unwrap();
5673
5674				header.hash()
5675			};
5676
5677			if matches!(pruning_mode, BlocksPruning::Some(_)) {
5678				assert_eq!(
5679					LastCanonicalized::Block(0),
5680					backend.storage.state_db.last_canonicalized()
5681				);
5682			}
5683
5684			// This should not trigger any forced canonicalization as we didn't have imported any
5685			// best block by now.
5686			let block2 = {
5687				let mut op = backend.begin_operation().unwrap();
5688				backend.begin_state_operation(&mut op, block1).unwrap();
5689				let mut header = Header {
5690					number: 2,
5691					parent_hash: block1,
5692					state_root: Default::default(),
5693					digest: Default::default(),
5694					extrinsics_root: Default::default(),
5695				};
5696
5697				let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 2]))];
5698
5699				let (root, overlay) = op.old_state.storage_root(
5700					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
5701					StateVersion::V1,
5702				);
5703				op.update_db_storage(overlay).unwrap();
5704				header.state_root = root.into();
5705
5706				op.update_storage(storage, Vec::new()).unwrap();
5707
5708				op.set_block_data(
5709					header.clone(),
5710					Some(Vec::new()),
5711					None,
5712					None,
5713					NewBlockState::Normal,
5714					true,
5715				)
5716				.unwrap();
5717
5718				backend.commit_operation(op).unwrap();
5719
5720				header.hash()
5721			};
5722
5723			if matches!(pruning_mode, BlocksPruning::Some(_)) {
5724				assert_eq!(
5725					LastCanonicalized::Block(0),
5726					backend.storage.state_db.last_canonicalized()
5727				);
5728			}
5729
5730			// This should also not trigger it yet, because we import a best block, but the best
5731			// block from the POV of the db is still at `0`.
5732			let block3 = {
5733				let mut op = backend.begin_operation().unwrap();
5734				backend.begin_state_operation(&mut op, block2).unwrap();
5735				let mut header = Header {
5736					number: 3,
5737					parent_hash: block2,
5738					state_root: Default::default(),
5739					digest: Default::default(),
5740					extrinsics_root: Default::default(),
5741				};
5742
5743				let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 3]))];
5744
5745				let (root, overlay) = op.old_state.storage_root(
5746					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
5747					StateVersion::V1,
5748				);
5749				op.update_db_storage(overlay).unwrap();
5750				header.state_root = root.into();
5751
5752				op.update_storage(storage, Vec::new()).unwrap();
5753
5754				op.set_block_data(
5755					header.clone(),
5756					Some(Vec::new()),
5757					None,
5758					None,
5759					NewBlockState::Best,
5760					true,
5761				)
5762				.unwrap();
5763
5764				backend.commit_operation(op).unwrap();
5765
5766				header.hash()
5767			};
5768
5769			// Now it should kick in.
5770			let block4 = {
5771				let mut op = backend.begin_operation().unwrap();
5772				backend.begin_state_operation(&mut op, block3).unwrap();
5773				let mut header = Header {
5774					number: 4,
5775					parent_hash: block3,
5776					state_root: Default::default(),
5777					digest: Default::default(),
5778					extrinsics_root: Default::default(),
5779				};
5780
5781				let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 4]))];
5782
5783				let (root, overlay) = op.old_state.storage_root(
5784					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
5785					StateVersion::V1,
5786				);
5787				op.update_db_storage(overlay).unwrap();
5788				header.state_root = root.into();
5789
5790				op.update_storage(storage, Vec::new()).unwrap();
5791
5792				op.set_block_data(
5793					header.clone(),
5794					Some(Vec::new()),
5795					None,
5796					None,
5797					NewBlockState::Best,
5798					true,
5799				)
5800				.unwrap();
5801
5802				backend.commit_operation(op).unwrap();
5803
5804				header.hash()
5805			};
5806
5807			if matches!(pruning_mode, BlocksPruning::Some(_)) {
5808				assert_eq!(
5809					LastCanonicalized::Block(2),
5810					backend.storage.state_db.last_canonicalized()
5811				);
5812			}
5813
5814			assert_eq!(block1, backend.blockchain().hash(1).unwrap().unwrap());
5815			assert_eq!(block2, backend.blockchain().hash(2).unwrap().unwrap());
5816			assert_eq!(block3, backend.blockchain().hash(3).unwrap().unwrap());
5817			assert_eq!(block4, backend.blockchain().hash(4).unwrap().unwrap());
5818		}
5819	}
5820
5821	#[test]
5822	fn test_pinned_blocks_on_finalize() {
5823		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
5824		let mut blocks = Vec::new();
5825		let mut prev_hash = Default::default();
5826
5827		let build_justification = |i: u64| ([0, 0, 0, 0], vec![i.try_into().unwrap()]);
5828		// Block tree:
5829		//   0 -> 1 -> 2 -> 3 -> 4
5830		for i in 0..5 {
5831			let hash = insert_block(
5832				&backend,
5833				i,
5834				prev_hash,
5835				None,
5836				Default::default(),
5837				vec![UncheckedXt::new_transaction(i.into(), ())],
5838				None,
5839			)
5840			.unwrap();
5841			blocks.push(hash);
5842			// Avoid block pruning.
5843			backend.pin_block(blocks[i as usize]).unwrap();
5844
5845			prev_hash = hash;
5846		}
5847
5848		let bc = backend.blockchain();
5849
5850		// Check that we can properly access values when there is reference count
5851		// but no value.
5852		assert_eq!(
5853			Some(vec![UncheckedXt::new_transaction(1.into(), ())]),
5854			bc.body(blocks[1]).unwrap()
5855		);
5856
5857		// Block 1 gets pinned three times
5858		backend.pin_block(blocks[1]).unwrap();
5859		backend.pin_block(blocks[1]).unwrap();
5860
5861		// Finalize all blocks. This will trigger pruning.
5862		let mut op = backend.begin_operation().unwrap();
5863		backend.begin_state_operation(&mut op, blocks[4]).unwrap();
5864		for i in 1..5 {
5865			op.mark_finalized(blocks[i], Some(build_justification(i.try_into().unwrap())))
5866				.unwrap();
5867		}
5868		backend.commit_operation(op).unwrap();
5869
5870		// Block 0, 1, 2, 3 are pinned, so all values should be cached.
5871		// Block 4 is inside the pruning window, its value is in db.
5872		assert_eq!(
5873			Some(vec![UncheckedXt::new_transaction(0.into(), ())]),
5874			bc.body(blocks[0]).unwrap()
5875		);
5876
5877		assert_eq!(
5878			Some(vec![UncheckedXt::new_transaction(1.into(), ())]),
5879			bc.body(blocks[1]).unwrap()
5880		);
5881		assert_eq!(
5882			Some(Justifications::from(build_justification(1))),
5883			bc.justifications(blocks[1]).unwrap()
5884		);
5885
5886		assert_eq!(
5887			Some(vec![UncheckedXt::new_transaction(2.into(), ())]),
5888			bc.body(blocks[2]).unwrap()
5889		);
5890		assert_eq!(
5891			Some(Justifications::from(build_justification(2))),
5892			bc.justifications(blocks[2]).unwrap()
5893		);
5894
5895		assert_eq!(
5896			Some(vec![UncheckedXt::new_transaction(3.into(), ())]),
5897			bc.body(blocks[3]).unwrap()
5898		);
5899		assert_eq!(
5900			Some(Justifications::from(build_justification(3))),
5901			bc.justifications(blocks[3]).unwrap()
5902		);
5903
5904		assert_eq!(
5905			Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
5906			bc.body(blocks[4]).unwrap()
5907		);
5908		assert_eq!(
5909			Some(Justifications::from(build_justification(4))),
5910			bc.justifications(blocks[4]).unwrap()
5911		);
5912
5913		// Unpin all blocks. Values should be removed from cache.
5914		for block in &blocks {
5915			backend.unpin_block(*block);
5916		}
5917
5918		assert!(bc.body(blocks[0]).unwrap().is_none());
5919		// Block 1 was pinned twice, we expect it to be still cached
5920		assert!(bc.body(blocks[1]).unwrap().is_some());
5921		assert!(bc.justifications(blocks[1]).unwrap().is_some());
5922		// Headers should also be available while pinned
5923		assert!(bc.header(blocks[1]).ok().flatten().is_some());
5924		assert!(bc.body(blocks[2]).unwrap().is_none());
5925		assert!(bc.justifications(blocks[2]).unwrap().is_none());
5926		assert!(bc.body(blocks[3]).unwrap().is_none());
5927		assert!(bc.justifications(blocks[3]).unwrap().is_none());
5928
5929		// After these unpins, block 1 should also be removed
5930		backend.unpin_block(blocks[1]);
5931		assert!(bc.body(blocks[1]).unwrap().is_some());
5932		assert!(bc.justifications(blocks[1]).unwrap().is_some());
5933		backend.unpin_block(blocks[1]);
5934		assert!(bc.body(blocks[1]).unwrap().is_none());
5935		assert!(bc.justifications(blocks[1]).unwrap().is_none());
5936
5937		// Block 4 is inside the pruning window and still kept
5938		assert_eq!(
5939			Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
5940			bc.body(blocks[4]).unwrap()
5941		);
5942		assert_eq!(
5943			Some(Justifications::from(build_justification(4))),
5944			bc.justifications(blocks[4]).unwrap()
5945		);
5946
5947		// Block tree:
5948		//   0 -> 1 -> 2 -> 3 -> 4 -> 5
5949		let hash = insert_block(
5950			&backend,
5951			5,
5952			prev_hash,
5953			None,
5954			Default::default(),
5955			vec![UncheckedXt::new_transaction(5.into(), ())],
5956			None,
5957		)
5958		.unwrap();
5959		blocks.push(hash);
5960
5961		backend.pin_block(blocks[4]).unwrap();
5962		// Mark block 5 as finalized.
5963		let mut op = backend.begin_operation().unwrap();
5964		backend.begin_state_operation(&mut op, blocks[5]).unwrap();
5965		op.mark_finalized(blocks[5], Some(build_justification(5))).unwrap();
5966		backend.commit_operation(op).unwrap();
5967
5968		assert!(bc.body(blocks[0]).unwrap().is_none());
5969		assert!(bc.body(blocks[1]).unwrap().is_none());
5970		assert!(bc.body(blocks[2]).unwrap().is_none());
5971		assert!(bc.body(blocks[3]).unwrap().is_none());
5972
5973		assert_eq!(
5974			Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
5975			bc.body(blocks[4]).unwrap()
5976		);
5977		assert_eq!(
5978			Some(Justifications::from(build_justification(4))),
5979			bc.justifications(blocks[4]).unwrap()
5980		);
5981		assert_eq!(
5982			Some(vec![UncheckedXt::new_transaction(5.into(), ())]),
5983			bc.body(blocks[5]).unwrap()
5984		);
5985		assert!(bc.header(blocks[5]).ok().flatten().is_some());
5986
5987		backend.unpin_block(blocks[4]);
5988		assert!(bc.body(blocks[4]).unwrap().is_none());
5989		assert!(bc.justifications(blocks[4]).unwrap().is_none());
5990
5991		// Append a justification to block 5.
5992		backend.append_justification(blocks[5], ([0, 0, 0, 1], vec![42])).unwrap();
5993
5994		let hash = insert_block(
5995			&backend,
5996			6,
5997			blocks[5],
5998			None,
5999			Default::default(),
6000			vec![UncheckedXt::new_transaction(6.into(), ())],
6001			None,
6002		)
6003		.unwrap();
6004		blocks.push(hash);
6005
6006		// Pin block 5 so it gets loaded into the cache on prune
6007		backend.pin_block(blocks[5]).unwrap();
6008
6009		// Finalize block 6 so block 5 gets pruned. Since it is pinned both justifications should be
6010		// in memory.
6011		let mut op = backend.begin_operation().unwrap();
6012		backend.begin_state_operation(&mut op, blocks[6]).unwrap();
6013		op.mark_finalized(blocks[6], None).unwrap();
6014		backend.commit_operation(op).unwrap();
6015
6016		assert_eq!(
6017			Some(vec![UncheckedXt::new_transaction(5.into(), ())]),
6018			bc.body(blocks[5]).unwrap()
6019		);
6020		assert!(bc.header(blocks[5]).ok().flatten().is_some());
6021		let mut expected = Justifications::from(build_justification(5));
6022		expected.append(([0, 0, 0, 1], vec![42]));
6023		assert_eq!(Some(expected), bc.justifications(blocks[5]).unwrap());
6024	}
6025
6026	#[test]
6027	fn test_pinned_blocks_on_finalize_with_fork() {
6028		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
6029		let mut blocks = Vec::new();
6030		let mut prev_hash = Default::default();
6031
6032		// Block tree:
6033		//   0 -> 1 -> 2 -> 3 -> 4
6034		for i in 0..5 {
6035			let hash = insert_block(
6036				&backend,
6037				i,
6038				prev_hash,
6039				None,
6040				Default::default(),
6041				vec![UncheckedXt::new_transaction(i.into(), ())],
6042				None,
6043			)
6044			.unwrap();
6045			blocks.push(hash);
6046
6047			// Avoid block pruning.
6048			backend.pin_block(blocks[i as usize]).unwrap();
6049
6050			prev_hash = hash;
6051		}
6052
6053		// Insert a fork at the second block.
6054		// Block tree:
6055		//   0 -> 1 -> 2 -> 3 -> 4
6056		//        \ -> 2 -> 3
6057		let fork_hash_root = insert_block(
6058			&backend,
6059			2,
6060			blocks[1],
6061			None,
6062			H256::random(),
6063			vec![UncheckedXt::new_transaction(2.into(), ())],
6064			None,
6065		)
6066		.unwrap();
6067		let fork_hash_3 = insert_block(
6068			&backend,
6069			3,
6070			fork_hash_root,
6071			None,
6072			H256::random(),
6073			vec![
6074				UncheckedXt::new_transaction(3.into(), ()),
6075				UncheckedXt::new_transaction(11.into(), ()),
6076			],
6077			None,
6078		)
6079		.unwrap();
6080
6081		// Do not prune the fork hash.
6082		backend.pin_block(fork_hash_3).unwrap();
6083
6084		let mut op = backend.begin_operation().unwrap();
6085		backend.begin_state_operation(&mut op, blocks[4]).unwrap();
6086		op.mark_head(blocks[4]).unwrap();
6087		backend.commit_operation(op).unwrap();
6088
6089		for i in 1..5 {
6090			let mut op = backend.begin_operation().unwrap();
6091			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
6092			op.mark_finalized(blocks[i], None).unwrap();
6093			backend.commit_operation(op).unwrap();
6094		}
6095
6096		let bc = backend.blockchain();
6097		assert_eq!(
6098			Some(vec![UncheckedXt::new_transaction(0.into(), ())]),
6099			bc.body(blocks[0]).unwrap()
6100		);
6101		assert_eq!(
6102			Some(vec![UncheckedXt::new_transaction(1.into(), ())]),
6103			bc.body(blocks[1]).unwrap()
6104		);
6105		assert_eq!(
6106			Some(vec![UncheckedXt::new_transaction(2.into(), ())]),
6107			bc.body(blocks[2]).unwrap()
6108		);
6109		assert_eq!(
6110			Some(vec![UncheckedXt::new_transaction(3.into(), ())]),
6111			bc.body(blocks[3]).unwrap()
6112		);
6113		assert_eq!(
6114			Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
6115			bc.body(blocks[4]).unwrap()
6116		);
6117		// Check the fork hashes.
6118		assert_eq!(None, bc.body(fork_hash_root).unwrap());
6119		assert_eq!(
6120			Some(vec![
6121				UncheckedXt::new_transaction(3.into(), ()),
6122				UncheckedXt::new_transaction(11.into(), ())
6123			]),
6124			bc.body(fork_hash_3).unwrap()
6125		);
6126
6127		// Unpin all blocks, except the forked one.
6128		for block in &blocks {
6129			backend.unpin_block(*block);
6130		}
6131		assert!(bc.body(blocks[0]).unwrap().is_none());
6132		assert!(bc.body(blocks[1]).unwrap().is_none());
6133		assert!(bc.body(blocks[2]).unwrap().is_none());
6134		assert!(bc.body(blocks[3]).unwrap().is_none());
6135
6136		assert!(bc.body(fork_hash_3).unwrap().is_some());
6137		backend.unpin_block(fork_hash_3);
6138		assert!(bc.body(fork_hash_3).unwrap().is_none());
6139	}
6140
6141	#[test]
6142	fn prune_blocks_with_empty_predicates_prunes_all() {
6143		// Test backward compatibility: empty predicates means all blocks are pruned
6144		let backend = Backend::<Block>::new_test_with_tx_storage_and_filters(
6145			BlocksPruning::Some(2),
6146			0,
6147			vec![], // Empty predicates
6148		);
6149
6150		let mut blocks = Vec::new();
6151		let mut prev_hash = Default::default();
6152
6153		// Create 5 blocks
6154		for i in 0..5 {
6155			let hash = insert_block(
6156				&backend,
6157				i,
6158				prev_hash,
6159				None,
6160				Default::default(),
6161				vec![UncheckedXt::new_transaction(i.into(), ())],
6162				None,
6163			)
6164			.unwrap();
6165			blocks.push(hash);
6166			prev_hash = hash;
6167		}
6168
6169		// Justification - but no predicate to preserve it
6170		let justification = (CONS0_ENGINE_ID, vec![1, 2, 3]);
6171
6172		// Finalize blocks, adding justification to block 1
6173		{
6174			let mut op = backend.begin_operation().unwrap();
6175			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
6176			op.mark_finalized(blocks[1], Some(justification.clone())).unwrap();
6177			op.mark_finalized(blocks[2], None).unwrap();
6178			op.mark_finalized(blocks[3], None).unwrap();
6179			op.mark_finalized(blocks[4], None).unwrap();
6180			backend.commit_operation(op).unwrap();
6181		}
6182
6183		let bc = backend.blockchain();
6184
6185		// All blocks outside pruning window should be pruned, even with justification
6186		assert_eq!(None, bc.body(blocks[0]).unwrap());
6187		assert_eq!(None, bc.body(blocks[1]).unwrap()); // Has justification but no predicate
6188		assert_eq!(None, bc.body(blocks[2]).unwrap());
6189
6190		// Blocks 3 and 4 are within the pruning window
6191		assert!(bc.body(blocks[3]).unwrap().is_some());
6192		assert!(bc.body(blocks[4]).unwrap().is_some());
6193	}
6194
6195	#[test]
6196	fn prune_blocks_multiple_filters_or_logic() {
6197		// Test that multiple filters use OR logic: if ANY filter matches, block is kept
6198		let backend = Backend::<Block>::new_test_with_tx_storage_and_filters(
6199			BlocksPruning::Some(2),
6200			0,
6201			vec![
6202				Arc::new(|j: &Justifications| j.get(CONS0_ENGINE_ID).is_some()),
6203				Arc::new(|j: &Justifications| j.get(CONS1_ENGINE_ID).is_some()),
6204			],
6205		);
6206
6207		let mut blocks = Vec::new();
6208		let mut prev_hash = Default::default();
6209
6210		// Create 7 blocks
6211		for i in 0..7 {
6212			let hash = insert_block(
6213				&backend,
6214				i,
6215				prev_hash,
6216				None,
6217				Default::default(),
6218				vec![UncheckedXt::new_transaction(i.into(), ())],
6219				None,
6220			)
6221			.unwrap();
6222			blocks.push(hash);
6223			prev_hash = hash;
6224		}
6225
6226		let cons0_justification = (CONS0_ENGINE_ID, vec![1, 2, 3]);
6227		let cons1_justification = (CONS1_ENGINE_ID, vec![4, 5, 6]);
6228
6229		// Finalize blocks with different justification patterns
6230		{
6231			let mut op = backend.begin_operation().unwrap();
6232			backend.begin_state_operation(&mut op, blocks[6]).unwrap();
6233			// Block 1: CONS0 only - should be preserved
6234			op.mark_finalized(blocks[1], Some(cons0_justification.clone())).unwrap();
6235			// Block 2: CONS1 only - should be preserved
6236			op.mark_finalized(blocks[2], Some(cons1_justification.clone())).unwrap();
6237			// Block 3: No justification - should be pruned
6238			op.mark_finalized(blocks[3], None).unwrap();
6239			// Block 4: Random/unknown engine ID - should be pruned
6240			op.mark_finalized(blocks[4], Some(([9, 9, 9, 9], vec![7, 8, 9]))).unwrap();
6241			op.mark_finalized(blocks[5], None).unwrap();
6242			op.mark_finalized(blocks[6], None).unwrap();
6243			backend.commit_operation(op).unwrap();
6244		}
6245
6246		let bc = backend.blockchain();
6247
6248		// Block 0 should be pruned (outside window, no justification)
6249		assert_eq!(None, bc.body(blocks[0]).unwrap());
6250
6251		// Block 1 should be preserved (has CONS0 justification)
6252		assert!(bc.body(blocks[1]).unwrap().is_some());
6253
6254		// Block 2 should be preserved (has CONS1 justification)
6255		assert!(bc.body(blocks[2]).unwrap().is_some());
6256
6257		// Block 3 should be pruned (no justification)
6258		assert_eq!(None, bc.body(blocks[3]).unwrap());
6259
6260		// Block 4 should be pruned (unknown engine ID)
6261		assert_eq!(None, bc.body(blocks[4]).unwrap());
6262
6263		// Blocks 5 and 6 are within the pruning window
6264		assert!(bc.body(blocks[5]).unwrap().is_some());
6265		assert!(bc.body(blocks[6]).unwrap().is_some());
6266	}
6267
6268	#[test]
6269	fn prune_blocks_filter_only_matches_specific_engine() {
6270		// Test that a filter for one engine ID does NOT preserve blocks with a different engine ID
6271		let backend = Backend::<Block>::new_test_with_tx_storage_and_filters(
6272			BlocksPruning::Some(2),
6273			0,
6274			vec![Arc::new(|j: &Justifications| j.get(CONS0_ENGINE_ID).is_some())],
6275		);
6276
6277		let mut blocks = Vec::new();
6278		let mut prev_hash = Default::default();
6279
6280		// Create 5 blocks
6281		for i in 0..5 {
6282			let hash = insert_block(
6283				&backend,
6284				i,
6285				prev_hash,
6286				None,
6287				Default::default(),
6288				vec![UncheckedXt::new_transaction(i.into(), ())],
6289				None,
6290			)
6291			.unwrap();
6292			blocks.push(hash);
6293			prev_hash = hash;
6294		}
6295
6296		let cons1_justification = (CONS1_ENGINE_ID, vec![4, 5, 6]);
6297
6298		// Finalize blocks, adding CONS1 justification to block 1
6299		{
6300			let mut op = backend.begin_operation().unwrap();
6301			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
6302			// Block 1 gets CONS1 justification - should NOT be preserved by CONS0 filter
6303			op.mark_finalized(blocks[1], Some(cons1_justification.clone())).unwrap();
6304			op.mark_finalized(blocks[2], None).unwrap();
6305			op.mark_finalized(blocks[3], None).unwrap();
6306			op.mark_finalized(blocks[4], None).unwrap();
6307			backend.commit_operation(op).unwrap();
6308		}
6309
6310		let bc = backend.blockchain();
6311
6312		// Block 0 should be pruned
6313		assert_eq!(None, bc.body(blocks[0]).unwrap());
6314
6315		// Block 1 should also be pruned (CONS1 justification, but only CONS0 filter)
6316		assert_eq!(None, bc.body(blocks[1]).unwrap());
6317
6318		// Block 2 should be pruned
6319		assert_eq!(None, bc.body(blocks[2]).unwrap());
6320
6321		// Blocks 3 and 4 are within the pruning window
6322		assert!(bc.body(blocks[3]).unwrap().is_some());
6323		assert!(bc.body(blocks[4]).unwrap().is_some());
6324	}
6325
6326	/// Insert a header without body as best block. This triggers `MissingBody` gap creation
6327	/// when the parent header exists and `create_gap` is true.
6328	fn insert_header_no_body_as_best(
6329		backend: &Backend<Block>,
6330		number: u64,
6331		parent_hash: H256,
6332	) -> H256 {
6333		use sp_runtime::testing::Digest;
6334
6335		let digest = Digest::default();
6336		let header = Header {
6337			number,
6338			parent_hash,
6339			state_root: Default::default(),
6340			digest,
6341			extrinsics_root: Default::default(),
6342		};
6343
6344		let mut op = backend.begin_operation().unwrap();
6345		// body = None triggers MissingBody gap when parent exists
6346		op.set_block_data(header.clone(), None, None, None, NewBlockState::Best, true)
6347			.unwrap();
6348		backend.commit_operation(op).unwrap();
6349
6350		header.hash()
6351	}
6352
6353	/// Re-open a backend from an existing database with the given blocks pruning mode.
6354	fn reopen_backend(
6355		db: Arc<dyn sp_database::Database<DbHash>>,
6356		blocks_pruning: BlocksPruning,
6357	) -> Backend<Block> {
6358		let state_pruning = match blocks_pruning {
6359			BlocksPruning::KeepAll => PruningMode::ArchiveAll,
6360			BlocksPruning::KeepFinalized => PruningMode::ArchiveCanonical,
6361			BlocksPruning::Some(n) => PruningMode::blocks_pruning(n),
6362		};
6363		Backend::<Block>::new(
6364			DatabaseSettings {
6365				trie_cache_maximum_size: Some(16 * 1024 * 1024),
6366				state_pruning: Some(state_pruning),
6367				source: DatabaseSource::Custom { db, require_create_flag: false },
6368				blocks_pruning,
6369				pruning_filters: Default::default(),
6370				metrics_registry: None,
6371			},
6372			0,
6373		)
6374		.unwrap()
6375	}
6376
6377	#[test]
6378	fn missing_body_gap_is_removed_for_non_archive_node() {
6379		// Create a non-archive backend and produce a multi-block MissingBody gap.
6380		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(100), 0);
6381		assert!(!backend.is_archive);
6382
6383		let genesis_hash = insert_header(&backend, 0, Default::default(), None, Default::default());
6384
6385		// Insert blocks 1..3 without bodies — creates a MissingBody gap spanning blocks 1 to 3.
6386		let hash_1 = insert_header_no_body_as_best(&backend, 1, genesis_hash);
6387		let hash_2 = insert_header_no_body_as_best(&backend, 2, hash_1);
6388		insert_header_no_body_as_best(&backend, 3, hash_2);
6389
6390		let info = backend.blockchain().info();
6391		assert!(info.block_gap.is_some(), "MissingBody gap should have been created");
6392		let gap = info.block_gap.unwrap();
6393		assert!(matches!(gap.gap_type, BlockGapType::MissingBody));
6394		assert_eq!(gap.start, 1);
6395		assert_eq!(gap.end, 3);
6396
6397		// Re-open the same database as a non-archive node.
6398		let db = backend.storage.db.clone();
6399		let backend = reopen_backend(db, BlocksPruning::Some(100));
6400		assert!(!backend.is_archive);
6401
6402		// The multi-block gap should have been removed on re-open.
6403		let info = backend.blockchain().info();
6404		assert!(
6405			info.block_gap.is_none(),
6406			"MissingBody gap should be removed for non-archive nodes, got: {:?}",
6407			info.block_gap,
6408		);
6409	}
6410
6411	#[test]
6412	fn missing_body_gap_is_preserved_for_archive_node() {
6413		// Create a backend with archive pruning and produce a multi-block MissingBody gap.
6414		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::KeepAll, 0);
6415		assert!(backend.is_archive);
6416
6417		let genesis_hash = insert_header(&backend, 0, Default::default(), None, Default::default());
6418
6419		// Insert blocks 1..3 without bodies — creates a MissingBody gap spanning blocks 1 to 3.
6420		let hash_1 = insert_header_no_body_as_best(&backend, 1, genesis_hash);
6421		let hash_2 = insert_header_no_body_as_best(&backend, 2, hash_1);
6422		insert_header_no_body_as_best(&backend, 3, hash_2);
6423
6424		let info = backend.blockchain().info();
6425		assert!(info.block_gap.is_some(), "MissingBody gap should have been created");
6426		let gap = info.block_gap.unwrap();
6427		assert!(matches!(gap.gap_type, BlockGapType::MissingBody));
6428		assert_eq!(gap.start, 1);
6429		assert_eq!(gap.end, 3);
6430
6431		// Re-open the same database as an archive node.
6432		let db = backend.storage.db.clone();
6433		let backend = reopen_backend(db, BlocksPruning::KeepAll);
6434		assert!(backend.is_archive);
6435
6436		// The gap should be preserved for archive nodes.
6437		let info = backend.blockchain().info();
6438		assert!(info.block_gap.is_some(), "MissingBody gap should be preserved for archive nodes",);
6439		let gap = info.block_gap.unwrap();
6440		assert!(matches!(gap.gap_type, BlockGapType::MissingBody));
6441		assert_eq!(gap.start, 1);
6442		assert_eq!(gap.end, 3);
6443	}
6444
6445	#[test]
6446	fn missing_header_and_body_gap_is_preserved_for_non_archive_node() {
6447		// Create a non-archive backend and produce a MissingHeaderAndBody gap (from warp sync).
6448		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(100), 0);
6449		assert!(!backend.is_archive);
6450
6451		let _genesis_hash =
6452			insert_header(&backend, 0, Default::default(), None, Default::default());
6453
6454		// Insert a disconnected block at height 3 with a fake parent to create a
6455		// MissingHeaderAndBody gap (blocks 1..2 are missing).
6456		insert_disconnected_header(&backend, 3, H256::from([200; 32]), Default::default(), true);
6457
6458		let info = backend.blockchain().info();
6459		assert!(info.block_gap.is_some(), "Gap should have been created");
6460		let gap = info.block_gap.unwrap();
6461		assert!(matches!(gap.gap_type, BlockGapType::MissingHeaderAndBody));
6462		assert_eq!(gap.start, 1);
6463		assert_eq!(gap.end, 2);
6464
6465		// Re-open the same database as a non-archive node.
6466		let db = backend.storage.db.clone();
6467		let backend = reopen_backend(db, BlocksPruning::Some(100));
6468		assert!(!backend.is_archive);
6469
6470		// The MissingHeaderAndBody gap should NOT be removed — only MissingBody gaps are removed.
6471		let info = backend.blockchain().info();
6472		assert!(
6473			info.block_gap.is_some(),
6474			"MissingHeaderAndBody gap should be preserved for non-archive nodes",
6475		);
6476		let gap = info.block_gap.unwrap();
6477		assert!(matches!(gap.gap_type, BlockGapType::MissingHeaderAndBody));
6478		assert_eq!(gap.start, 1);
6479		assert_eq!(gap.end, 2);
6480	}
6481}