Skip to main content

soil_client/db/
mod.rs

1// This file is part of Soil.
2
3// Copyright (C) Soil contributors.
4// Copyright (C) Parity Technologies (UK) Ltd.
5// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
6
7//! Client backend that is backed by a database.
8//!
9//! # Canonicality vs. Finality
10//!
11//! Finality indicates that a block will not be reverted, according to the consensus algorithm,
12//! while canonicality indicates that the block may be reverted, but we will be unable to do so,
13//! having discarded heavy state that will allow a chain reorganization.
14//!
15//! Finality implies canonicality but not vice-versa.
16
17pub mod state_db;
18
19pub mod offchain;
20
21pub mod bench;
22
23mod children;
24mod parity_db;
25mod pinned_blocks_cache;
26mod record_stats_state;
27mod stats;
28mod trie_cache_metrics;
29#[cfg(any(feature = "rocksdb", test))]
30mod upgrade;
31mod utils;
32
33use linked_hash_map::LinkedHashMap;
34use log::{debug, trace, warn};
35use parking_lot::{Mutex, RwLock};
36use soil_prometheus::Registry;
37use std::{
38	collections::{HashMap, HashSet},
39	io,
40	path::{Path, PathBuf},
41	sync::Arc,
42};
43
44use self::state_db::{IsPruned, LastCanonicalized, StateDb};
45use self::trie_cache_metrics::PrometheusTrieCacheMetrics;
46use self::utils::BLOCK_GAP_CURRENT_VERSION;
47use self::{
48	pinned_blocks_cache::PinnedBlocksCache,
49	record_stats_state::RecordStatsState,
50	stats::StateUsageStats,
51	utils::{meta_keys, read_db, read_meta, remove_from_db, DatabaseType, Meta},
52};
53use crate::blockchain::{
54	Backend as _, CachedHeaderMetadata, DisplacedLeavesAfterFinalization, Error as ClientError,
55	HeaderBackend, HeaderMetadata, HeaderMetadataCache, Result as ClientResult,
56};
57use crate::client_api::{
58	backend::NewBlockState,
59	blockchain::{BlockGap, BlockGapType},
60	leaves::{FinalizationOutcome, LeafSet},
61	utils::is_descendent_of,
62	IoInfo, MemoryInfo, MemorySize, TrieCacheContext, UsageInfo,
63};
64use codec::{Decode, Encode};
65use hash_db::Prefix;
66use subsoil::arithmetic::traits::Saturating;
67use subsoil::core::{
68	offchain::OffchainOverlayedChange,
69	storage::{well_known_keys, ChildInfo},
70};
71use subsoil::database::Transaction;
72use subsoil::runtime::{
73	generic::BlockId,
74	traits::{
75		Block as BlockT, Hash, HashingFor, Header as HeaderT, NumberFor, One, SaturatedConversion,
76		Zero,
77	},
78	Justification, Justifications, StateVersion, Storage,
79};
80use subsoil::state_machine::{
81	backend::{AsTrieBackend, Backend as StateBackend},
82	BackendTransaction, ChildStorageCollection, DBValue, IndexOperation, IterArgs,
83	OffchainChangesCollection, StateMachineStats, StorageCollection, StorageIterator, StorageKey,
84	StorageValue, UsageInfo as StateUsageInfo,
85};
86use subsoil::trie::{
87	cache::SharedTrieCache, prefixed_key, MemoryDB, MerkleValue, PrefixedMemoryDB,
88};
89
90// Re-export the Database trait so that one can pass an implementation of it.
91pub use self::state_db::PruningMode;
92pub use subsoil::database::Database;
93
94pub use bench::BenchmarkingState;
95
96/// Filter to determine if a block should be excluded from pruning.
97///
98/// Note: This filter only affects **block body** (and future header) pruning.
99/// It does **not** affect state pruning, which is configured separately.
100pub trait PruningFilter: Send + Sync {
101	/// Check if a block with the given justifications should be preserved.
102	///
103	/// Returns `true` to preserve the block, `false` to allow pruning.
104	fn should_retain(&self, justifications: &Justifications) -> bool;
105}
106
107impl<F> PruningFilter for F
108where
109	F: Fn(&Justifications) -> bool + Send + Sync,
110{
111	fn should_retain(&self, justifications: &Justifications) -> bool {
112		(self)(justifications)
113	}
114}
115
116const CACHE_HEADERS: usize = 8;
117
118/// DB-backed patricia trie state, transaction type is an overlay of changes to commit.
119pub type DbState<H> =
120	subsoil::state_machine::TrieBackend<Arc<dyn subsoil::state_machine::Storage<H>>, H>;
121
122/// Builder for [`DbState`].
123pub type DbStateBuilder<Hasher> = subsoil::state_machine::TrieBackendBuilder<
124	Arc<dyn subsoil::state_machine::Storage<Hasher>>,
125	Hasher,
126>;
127
128/// Length of a [`DbHash`].
129const DB_HASH_LEN: usize = 32;
130
131/// Hash type that this backend uses for the database.
132pub type DbHash = subsoil::core::H256;
133
134/// An extrinsic entry in the database.
135#[derive(Debug, Encode, Decode)]
136enum DbExtrinsic<B: BlockT> {
137	/// Extrinsic that contains indexed data.
138	Indexed {
139		/// Hash of the indexed part.
140		hash: DbHash,
141		/// Extrinsic header.
142		header: Vec<u8>,
143	},
144	/// Complete extrinsic data.
145	Full(B::Extrinsic),
146}
147
148/// A reference tracking state.
149///
150/// It makes sure that the hash we are using stays pinned in storage
151/// until this structure is dropped.
152pub struct RefTrackingState<Block: BlockT> {
153	state: DbState<HashingFor<Block>>,
154	storage: Arc<StorageDb<Block>>,
155	parent_hash: Option<Block::Hash>,
156}
157
158impl<B: BlockT> RefTrackingState<B> {
159	fn new(
160		state: DbState<HashingFor<B>>,
161		storage: Arc<StorageDb<B>>,
162		parent_hash: Option<B::Hash>,
163	) -> Self {
164		RefTrackingState { state, parent_hash, storage }
165	}
166}
167
168impl<B: BlockT> Drop for RefTrackingState<B> {
169	fn drop(&mut self) {
170		if let Some(hash) = &self.parent_hash {
171			self.storage.state_db.unpin(hash);
172		}
173	}
174}
175
176impl<Block: BlockT> std::fmt::Debug for RefTrackingState<Block> {
177	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
178		write!(f, "Block {:?}", self.parent_hash)
179	}
180}
181
182/// A raw iterator over the `RefTrackingState`.
183pub struct RawIter<B: BlockT> {
184	inner: <DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::RawIter,
185}
186
187impl<B: BlockT> StorageIterator<HashingFor<B>> for RawIter<B> {
188	type Backend = RefTrackingState<B>;
189	type Error = <DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::Error;
190
191	fn next_key(&mut self, backend: &Self::Backend) -> Option<Result<StorageKey, Self::Error>> {
192		self.inner.next_key(&backend.state)
193	}
194
195	fn next_pair(
196		&mut self,
197		backend: &Self::Backend,
198	) -> Option<Result<(StorageKey, StorageValue), Self::Error>> {
199		self.inner.next_pair(&backend.state)
200	}
201
202	fn was_complete(&self) -> bool {
203		self.inner.was_complete()
204	}
205}
206
207impl<B: BlockT> StateBackend<HashingFor<B>> for RefTrackingState<B> {
208	type Error = <DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::Error;
209	type TrieBackendStorage =
210		<DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::TrieBackendStorage;
211	type RawIter = RawIter<B>;
212
213	fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
214		self.state.storage(key)
215	}
216
217	fn storage_hash(&self, key: &[u8]) -> Result<Option<B::Hash>, Self::Error> {
218		self.state.storage_hash(key)
219	}
220
221	fn child_storage(
222		&self,
223		child_info: &ChildInfo,
224		key: &[u8],
225	) -> Result<Option<Vec<u8>>, Self::Error> {
226		self.state.child_storage(child_info, key)
227	}
228
229	fn child_storage_hash(
230		&self,
231		child_info: &ChildInfo,
232		key: &[u8],
233	) -> Result<Option<B::Hash>, Self::Error> {
234		self.state.child_storage_hash(child_info, key)
235	}
236
237	fn closest_merkle_value(
238		&self,
239		key: &[u8],
240	) -> Result<Option<MerkleValue<B::Hash>>, Self::Error> {
241		self.state.closest_merkle_value(key)
242	}
243
244	fn child_closest_merkle_value(
245		&self,
246		child_info: &ChildInfo,
247		key: &[u8],
248	) -> Result<Option<MerkleValue<B::Hash>>, Self::Error> {
249		self.state.child_closest_merkle_value(child_info, key)
250	}
251
252	fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
253		self.state.exists_storage(key)
254	}
255
256	fn exists_child_storage(
257		&self,
258		child_info: &ChildInfo,
259		key: &[u8],
260	) -> Result<bool, Self::Error> {
261		self.state.exists_child_storage(child_info, key)
262	}
263
264	fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
265		self.state.next_storage_key(key)
266	}
267
268	fn next_child_storage_key(
269		&self,
270		child_info: &ChildInfo,
271		key: &[u8],
272	) -> Result<Option<Vec<u8>>, Self::Error> {
273		self.state.next_child_storage_key(child_info, key)
274	}
275
276	fn storage_root<'a>(
277		&self,
278		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
279		state_version: StateVersion,
280	) -> (B::Hash, BackendTransaction<HashingFor<B>>) {
281		self.state.storage_root(delta, state_version)
282	}
283
284	fn child_storage_root<'a>(
285		&self,
286		child_info: &ChildInfo,
287		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
288		state_version: StateVersion,
289	) -> (B::Hash, bool, BackendTransaction<HashingFor<B>>) {
290		self.state.child_storage_root(child_info, delta, state_version)
291	}
292
293	fn raw_iter(&self, args: IterArgs) -> Result<Self::RawIter, Self::Error> {
294		self.state.raw_iter(args).map(|inner| RawIter { inner })
295	}
296
297	fn register_overlay_stats(&self, stats: &StateMachineStats) {
298		self.state.register_overlay_stats(stats);
299	}
300
301	fn usage_info(&self) -> StateUsageInfo {
302		self.state.usage_info()
303	}
304}
305
306impl<B: BlockT> AsTrieBackend<HashingFor<B>> for RefTrackingState<B> {
307	type TrieBackendStorage =
308		<DbState<HashingFor<B>> as StateBackend<HashingFor<B>>>::TrieBackendStorage;
309
310	fn as_trie_backend(
311		&self,
312	) -> &subsoil::state_machine::TrieBackend<Self::TrieBackendStorage, HashingFor<B>> {
313		&self.state.as_trie_backend()
314	}
315}
316
317/// Database settings.
318pub struct DatabaseSettings {
319	/// The maximum trie cache size in bytes.
320	///
321	/// If `None` is given, the cache is disabled.
322	pub trie_cache_maximum_size: Option<usize>,
323	/// Requested state pruning mode.
324	pub state_pruning: Option<PruningMode>,
325	/// Where to find the database.
326	pub source: DatabaseSource,
327	/// Block pruning mode.
328	///
329	/// NOTE: only finalized blocks are subject for removal!
330	pub blocks_pruning: BlocksPruning,
331	/// Filters to exclude blocks from pruning.
332	///
333	/// If any filter returns `true` for a block's justifications, the block body
334	/// (and in the future, the header) will be preserved even when it falls
335	/// outside the pruning window. Does not affect state pruning.
336	pub pruning_filters: Vec<Arc<dyn PruningFilter>>,
337	/// Prometheus metrics registry.
338	pub metrics_registry: Option<Registry>,
339}
340
341/// Block pruning settings.
342#[derive(Debug, Clone, Copy, PartialEq)]
343pub enum BlocksPruning {
344	/// Keep full block history, of every block that was ever imported.
345	KeepAll,
346	/// Keep full finalized block history.
347	KeepFinalized,
348	/// Keep N recent finalized blocks.
349	Some(u32),
350}
351
352impl BlocksPruning {
353	/// True if this is an archive pruning mode (either KeepAll or KeepFinalized).
354	pub fn is_archive(&self) -> bool {
355		match *self {
356			BlocksPruning::KeepAll | BlocksPruning::KeepFinalized => true,
357			BlocksPruning::Some(_) => false,
358		}
359	}
360}
361
362/// Where to find the database..
363#[derive(Debug, Clone)]
364pub enum DatabaseSource {
365	/// Check given path, and see if there is an existing database there. If it's either `RocksDb`
366	/// or `ParityDb`, use it. If there is none, create a new instance of `ParityDb`.
367	Auto {
368		/// Path to the paritydb database.
369		paritydb_path: PathBuf,
370		/// Path to the rocksdb database.
371		rocksdb_path: PathBuf,
372		/// Cache size in MiB. Used only by `RocksDb` variant of `DatabaseSource`.
373		cache_size: usize,
374	},
375	/// Load a RocksDB database from a given path. Recommended for most uses.
376	#[cfg(feature = "rocksdb")]
377	RocksDb {
378		/// Path to the database.
379		path: PathBuf,
380		/// Cache size in MiB.
381		cache_size: usize,
382	},
383
384	/// Load a ParityDb database from a given path.
385	ParityDb {
386		/// Path to the database.
387		path: PathBuf,
388	},
389
390	/// Use a custom already-open database.
391	Custom {
392		/// the handle to the custom storage
393		db: Arc<dyn Database<DbHash>>,
394
395		/// if set, the `create` flag will be required to open such datasource
396		require_create_flag: bool,
397	},
398}
399
400impl DatabaseSource {
401	/// Return path for databases that are stored on disk.
402	pub fn path(&self) -> Option<&Path> {
403		match self {
404			// as per https://github.com/paritytech/substrate/pull/9500#discussion_r684312550
405			//
406			// IIUC this is needed for polkadot to create its own dbs, so until it can use parity db
407			// I would think rocksdb, but later parity-db.
408			DatabaseSource::Auto { paritydb_path, .. } => Some(paritydb_path),
409			#[cfg(feature = "rocksdb")]
410			DatabaseSource::RocksDb { path, .. } => Some(path),
411			DatabaseSource::ParityDb { path } => Some(path),
412			DatabaseSource::Custom { .. } => None,
413		}
414	}
415
416	/// Set path for databases that are stored on disk.
417	pub fn set_path(&mut self, p: &Path) -> bool {
418		match self {
419			DatabaseSource::Auto { ref mut paritydb_path, .. } => {
420				*paritydb_path = p.into();
421				true
422			},
423			#[cfg(feature = "rocksdb")]
424			DatabaseSource::RocksDb { ref mut path, .. } => {
425				*path = p.into();
426				true
427			},
428			DatabaseSource::ParityDb { ref mut path } => {
429				*path = p.into();
430				true
431			},
432			DatabaseSource::Custom { .. } => false,
433		}
434	}
435}
436
437impl std::fmt::Display for DatabaseSource {
438	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
439		let name = match self {
440			DatabaseSource::Auto { .. } => "Auto",
441			#[cfg(feature = "rocksdb")]
442			DatabaseSource::RocksDb { .. } => "RocksDb",
443			DatabaseSource::ParityDb { .. } => "ParityDb",
444			DatabaseSource::Custom { .. } => "Custom",
445		};
446		write!(f, "{}", name)
447	}
448}
449
450pub(crate) mod columns {
451	pub const META: u32 = super::utils::COLUMN_META;
452	pub const STATE: u32 = 1;
453	pub const STATE_META: u32 = 2;
454	/// maps hashes to lookup keys and numbers to canon hashes.
455	pub const KEY_LOOKUP: u32 = 3;
456	pub const HEADER: u32 = 4;
457	pub const BODY: u32 = 5;
458	pub const JUSTIFICATIONS: u32 = 6;
459	pub const AUX: u32 = 8;
460	/// Offchain workers local storage
461	pub const OFFCHAIN: u32 = 9;
462	/// Transactions
463	pub const TRANSACTION: u32 = 11;
464	pub const BODY_INDEX: u32 = 12;
465}
466
467struct PendingBlock<Block: BlockT> {
468	header: Block::Header,
469	justifications: Option<Justifications>,
470	body: Option<Vec<Block::Extrinsic>>,
471	indexed_body: Option<Vec<Vec<u8>>>,
472	leaf_state: NewBlockState,
473	register_as_leaf: bool,
474}
475
476// wrapper that implements trait required for state_db
477#[derive(Clone)]
478struct StateMetaDb(Arc<dyn Database<DbHash>>);
479
480impl self::state_db::MetaDb for StateMetaDb {
481	type Error = subsoil::database::error::DatabaseError;
482
483	fn get_meta(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
484		Ok(self.0.get(columns::STATE_META, key))
485	}
486}
487
488struct MetaUpdate<Block: BlockT> {
489	pub hash: Block::Hash,
490	pub number: NumberFor<Block>,
491	pub is_best: bool,
492	pub is_finalized: bool,
493	pub with_state: bool,
494}
495
496fn cache_header<Hash: std::cmp::Eq + std::hash::Hash, Header>(
497	cache: &mut LinkedHashMap<Hash, Option<Header>>,
498	hash: Hash,
499	header: Option<Header>,
500) {
501	cache.insert(hash, header);
502	while cache.len() > CACHE_HEADERS {
503		cache.pop_front();
504	}
505}
506
507/// Block database
508pub struct BlockchainDb<Block: BlockT> {
509	db: Arc<dyn Database<DbHash>>,
510	meta: Arc<RwLock<Meta<NumberFor<Block>, Block::Hash>>>,
511	leaves: RwLock<LeafSet<Block::Hash, NumberFor<Block>>>,
512	header_metadata_cache: Arc<HeaderMetadataCache<Block>>,
513	header_cache: Mutex<LinkedHashMap<Block::Hash, Option<Block::Header>>>,
514	pinned_blocks_cache: Arc<RwLock<PinnedBlocksCache<Block>>>,
515}
516
517impl<Block: BlockT> BlockchainDb<Block> {
518	fn new(db: Arc<dyn Database<DbHash>>) -> ClientResult<Self> {
519		let meta = read_meta::<Block>(&*db, columns::HEADER)?;
520		let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?;
521		Ok(BlockchainDb {
522			db,
523			leaves: RwLock::new(leaves),
524			meta: Arc::new(RwLock::new(meta)),
525			header_metadata_cache: Arc::new(HeaderMetadataCache::default()),
526			header_cache: Default::default(),
527			pinned_blocks_cache: Arc::new(RwLock::new(PinnedBlocksCache::new())),
528		})
529	}
530
531	fn update_meta(&self, update: MetaUpdate<Block>) {
532		let MetaUpdate { hash, number, is_best, is_finalized, with_state } = update;
533		let mut meta = self.meta.write();
534		if number.is_zero() {
535			meta.genesis_hash = hash;
536		}
537
538		if is_best {
539			meta.best_number = number;
540			meta.best_hash = hash;
541		}
542
543		if is_finalized {
544			if with_state {
545				meta.finalized_state = Some((hash, number));
546			}
547			meta.finalized_number = number;
548			meta.finalized_hash = hash;
549		}
550	}
551
552	fn update_block_gap(&self, gap: Option<BlockGap<NumberFor<Block>>>) {
553		let mut meta = self.meta.write();
554		meta.block_gap = gap;
555	}
556
557	/// Empty the cache of pinned items.
558	fn clear_pinning_cache(&self) {
559		self.pinned_blocks_cache.write().clear();
560	}
561
562	/// Load a justification into the cache of pinned items.
563	/// Reference count of the item will not be increased. Use this
564	/// to load values for items into the cache which have already been pinned.
565	fn insert_justifications_if_pinned(&self, hash: Block::Hash, justification: Justification) {
566		let mut cache = self.pinned_blocks_cache.write();
567		if !cache.contains(hash) {
568			return;
569		}
570
571		let justifications = Justifications::from(justification);
572		cache.insert_justifications(hash, Some(justifications));
573	}
574
575	/// Load a justification from the db into the cache of pinned items.
576	/// Reference count of the item will not be increased. Use this
577	/// to load values for items into the cache which have already been pinned.
578	fn insert_persisted_justifications_if_pinned(&self, hash: Block::Hash) -> ClientResult<()> {
579		let mut cache = self.pinned_blocks_cache.write();
580		if !cache.contains(hash) {
581			return Ok(());
582		}
583
584		let justifications = self.justifications_uncached(hash)?;
585		cache.insert_justifications(hash, justifications);
586		Ok(())
587	}
588
589	/// Load a block body from the db into the cache of pinned items.
590	/// Reference count of the item will not be increased. Use this
591	/// to load values for items items into the cache which have already been pinned.
592	fn insert_persisted_body_if_pinned(&self, hash: Block::Hash) -> ClientResult<()> {
593		let mut cache = self.pinned_blocks_cache.write();
594		if !cache.contains(hash) {
595			return Ok(());
596		}
597
598		let body = self.body_uncached(hash)?;
599		cache.insert_body(hash, body);
600		Ok(())
601	}
602
603	/// Bump reference count for pinned item.
604	fn bump_ref(&self, hash: Block::Hash) {
605		self.pinned_blocks_cache.write().pin(hash);
606	}
607
608	/// Decrease reference count for pinned item and remove if reference count is 0.
609	fn unpin(&self, hash: Block::Hash) {
610		self.pinned_blocks_cache.write().unpin(hash);
611	}
612
613	fn justifications_uncached(&self, hash: Block::Hash) -> ClientResult<Option<Justifications>> {
614		match read_db(
615			&*self.db,
616			columns::KEY_LOOKUP,
617			columns::JUSTIFICATIONS,
618			BlockId::<Block>::Hash(hash),
619		)? {
620			Some(justifications) => match Decode::decode(&mut &justifications[..]) {
621				Ok(justifications) => Ok(Some(justifications)),
622				Err(err) => {
623					return Err(crate::blockchain::Error::Backend(format!(
624						"Error decoding justifications: {err}"
625					)))
626				},
627			},
628			None => Ok(None),
629		}
630	}
631
632	fn body_uncached(&self, hash: Block::Hash) -> ClientResult<Option<Vec<Block::Extrinsic>>> {
633		if let Some(body) =
634			read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, BlockId::Hash::<Block>(hash))?
635		{
636			// Plain body
637			match Decode::decode(&mut &body[..]) {
638				Ok(body) => return Ok(Some(body)),
639				Err(err) => {
640					return Err(crate::blockchain::Error::Backend(format!(
641						"Error decoding body: {err}"
642					)))
643				},
644			}
645		}
646
647		if let Some(index) = read_db(
648			&*self.db,
649			columns::KEY_LOOKUP,
650			columns::BODY_INDEX,
651			BlockId::Hash::<Block>(hash),
652		)? {
653			match Vec::<DbExtrinsic<Block>>::decode(&mut &index[..]) {
654				Ok(index) => {
655					let mut body = Vec::new();
656					for ex in index {
657						match ex {
658							DbExtrinsic::Indexed { hash, header } => {
659								match self.db.get(columns::TRANSACTION, hash.as_ref()) {
660									Some(t) => {
661										let mut input =
662											utils::join_input(header.as_ref(), t.as_ref());
663										let ex = Block::Extrinsic::decode(&mut input).map_err(
664											|err| {
665												crate::blockchain::Error::Backend(format!(
666													"Error decoding indexed extrinsic: {err}"
667												))
668											},
669										)?;
670										body.push(ex);
671									},
672									None => {
673										return Err(crate::blockchain::Error::Backend(format!(
674											"Missing indexed transaction {hash:?}"
675										)))
676									},
677								};
678							},
679							DbExtrinsic::Full(ex) => {
680								body.push(ex);
681							},
682						}
683					}
684					return Ok(Some(body));
685				},
686				Err(err) => {
687					return Err(crate::blockchain::Error::Backend(format!(
688						"Error decoding body list: {err}",
689					)))
690				},
691			}
692		}
693		Ok(None)
694	}
695}
696
697impl<Block: BlockT> crate::client_api::blockchain::HeaderBackend<Block> for BlockchainDb<Block> {
698	fn header(&self, hash: Block::Hash) -> ClientResult<Option<Block::Header>> {
699		let mut cache = self.header_cache.lock();
700		if let Some(result) = cache.get_refresh(&hash) {
701			return Ok(result.clone());
702		}
703		let header = utils::read_header(
704			&*self.db,
705			columns::KEY_LOOKUP,
706			columns::HEADER,
707			BlockId::<Block>::Hash(hash),
708		)?;
709		cache_header(&mut cache, hash, header.clone());
710		Ok(header)
711	}
712
713	fn info(&self) -> crate::client_api::blockchain::Info<Block> {
714		let meta = self.meta.read();
715		crate::client_api::blockchain::Info {
716			best_hash: meta.best_hash,
717			best_number: meta.best_number,
718			genesis_hash: meta.genesis_hash,
719			finalized_hash: meta.finalized_hash,
720			finalized_number: meta.finalized_number,
721			finalized_state: meta.finalized_state,
722			number_leaves: self.leaves.read().count(),
723			block_gap: meta.block_gap,
724		}
725	}
726
727	fn status(
728		&self,
729		hash: Block::Hash,
730	) -> ClientResult<crate::client_api::blockchain::BlockStatus> {
731		match self.header(hash)?.is_some() {
732			true => Ok(crate::client_api::blockchain::BlockStatus::InChain),
733			false => Ok(crate::client_api::blockchain::BlockStatus::Unknown),
734		}
735	}
736
737	fn number(&self, hash: Block::Hash) -> ClientResult<Option<NumberFor<Block>>> {
738		Ok(self.header_metadata(hash).ok().map(|header_metadata| header_metadata.number))
739	}
740
741	fn hash(&self, number: NumberFor<Block>) -> ClientResult<Option<Block::Hash>> {
742		Ok(utils::read_header::<Block>(
743			&*self.db,
744			columns::KEY_LOOKUP,
745			columns::HEADER,
746			BlockId::Number(number),
747		)?
748		.map(|header| header.hash()))
749	}
750}
751
752impl<Block: BlockT> crate::client_api::blockchain::Backend<Block> for BlockchainDb<Block> {
753	fn body(&self, hash: Block::Hash) -> ClientResult<Option<Vec<Block::Extrinsic>>> {
754		let cache = self.pinned_blocks_cache.read();
755		if let Some(result) = cache.body(&hash) {
756			return Ok(result.clone());
757		}
758
759		self.body_uncached(hash)
760	}
761
762	fn justifications(&self, hash: Block::Hash) -> ClientResult<Option<Justifications>> {
763		let cache = self.pinned_blocks_cache.read();
764		if let Some(result) = cache.justifications(&hash) {
765			return Ok(result.clone());
766		}
767
768		self.justifications_uncached(hash)
769	}
770
771	fn last_finalized(&self) -> ClientResult<Block::Hash> {
772		Ok(self.meta.read().finalized_hash)
773	}
774
775	fn leaves(&self) -> ClientResult<Vec<Block::Hash>> {
776		Ok(self.leaves.read().hashes())
777	}
778
779	fn children(&self, parent_hash: Block::Hash) -> ClientResult<Vec<Block::Hash>> {
780		children::read_children(&*self.db, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash)
781	}
782
783	fn indexed_transaction(&self, hash: Block::Hash) -> ClientResult<Option<Vec<u8>>> {
784		Ok(self.db.get(columns::TRANSACTION, hash.as_ref()))
785	}
786
787	fn has_indexed_transaction(&self, hash: Block::Hash) -> ClientResult<bool> {
788		Ok(self.db.contains(columns::TRANSACTION, hash.as_ref()))
789	}
790
791	fn block_indexed_body(&self, hash: Block::Hash) -> ClientResult<Option<Vec<Vec<u8>>>> {
792		let body = match read_db(
793			&*self.db,
794			columns::KEY_LOOKUP,
795			columns::BODY_INDEX,
796			BlockId::<Block>::Hash(hash),
797		)? {
798			Some(body) => body,
799			None => return Ok(None),
800		};
801		match Vec::<DbExtrinsic<Block>>::decode(&mut &body[..]) {
802			Ok(index) => {
803				let mut transactions = Vec::new();
804				for ex in index.into_iter() {
805					if let DbExtrinsic::Indexed { hash, .. } = ex {
806						match self.db.get(columns::TRANSACTION, hash.as_ref()) {
807							Some(t) => transactions.push(t),
808							None => {
809								return Err(crate::blockchain::Error::Backend(format!(
810									"Missing indexed transaction {hash:?}",
811								)))
812							},
813						}
814					}
815				}
816				Ok(Some(transactions))
817			},
818			Err(err) => {
819				Err(crate::blockchain::Error::Backend(format!("Error decoding body list: {err}")))
820			},
821		}
822	}
823}
824
825impl<Block: BlockT> HeaderMetadata<Block> for BlockchainDb<Block> {
826	type Error = crate::blockchain::Error;
827
828	fn header_metadata(
829		&self,
830		hash: Block::Hash,
831	) -> Result<CachedHeaderMetadata<Block>, Self::Error> {
832		self.header_metadata_cache.header_metadata(hash).map_or_else(
833			|| {
834				self.header(hash)?
835					.map(|header| {
836						let header_metadata = CachedHeaderMetadata::from(&header);
837						self.header_metadata_cache
838							.insert_header_metadata(header_metadata.hash, header_metadata.clone());
839						header_metadata
840					})
841					.ok_or_else(|| {
842						ClientError::UnknownBlock(format!(
843							"Header was not found in the database: {hash:?}",
844						))
845					})
846			},
847			Ok,
848		)
849	}
850
851	fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata<Block>) {
852		self.header_metadata_cache.insert_header_metadata(hash, metadata)
853	}
854
855	fn remove_header_metadata(&self, hash: Block::Hash) {
856		self.header_cache.lock().remove(&hash);
857		self.header_metadata_cache.remove_header_metadata(hash);
858	}
859}
860
861/// Database transaction
862pub struct BlockImportOperation<Block: BlockT> {
863	old_state: RecordStatsState<RefTrackingState<Block>, Block>,
864	db_updates: PrefixedMemoryDB<HashingFor<Block>>,
865	storage_updates: StorageCollection,
866	child_storage_updates: ChildStorageCollection,
867	offchain_storage_updates: OffchainChangesCollection,
868	pending_block: Option<PendingBlock<Block>>,
869	aux_ops: Vec<(Vec<u8>, Option<Vec<u8>>)>,
870	finalized_blocks: Vec<(Block::Hash, Option<Justification>)>,
871	set_head: Option<Block::Hash>,
872	commit_state: bool,
873	create_gap: bool,
874	reset_storage: bool,
875	index_ops: Vec<IndexOperation>,
876}
877
878impl<Block: BlockT> BlockImportOperation<Block> {
879	fn apply_offchain(&mut self, transaction: &mut Transaction<DbHash>) {
880		let mut count = 0;
881		for ((prefix, key), value_operation) in self.offchain_storage_updates.drain(..) {
882			count += 1;
883			let key = self::offchain::concatenate_prefix_and_key(&prefix, &key);
884			match value_operation {
885				OffchainOverlayedChange::SetValue(val) => {
886					transaction.set_from_vec(columns::OFFCHAIN, &key, val)
887				},
888				OffchainOverlayedChange::Remove => transaction.remove(columns::OFFCHAIN, &key),
889			}
890		}
891
892		if count > 0 {
893			log::debug!(target: "soil_offchain", "Applied {count} offchain indexing changes.");
894		}
895	}
896
897	fn apply_aux(&mut self, transaction: &mut Transaction<DbHash>) {
898		for (key, maybe_val) in self.aux_ops.drain(..) {
899			match maybe_val {
900				Some(val) => transaction.set_from_vec(columns::AUX, &key, val),
901				None => transaction.remove(columns::AUX, &key),
902			}
903		}
904	}
905
906	fn apply_new_state(
907		&mut self,
908		storage: Storage,
909		state_version: StateVersion,
910	) -> ClientResult<Block::Hash> {
911		if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(k)) {
912			return Err(crate::blockchain::Error::InvalidState);
913		}
914
915		let child_delta = storage.children_default.values().map(|child_content| {
916			(
917				&child_content.child_info,
918				child_content.data.iter().map(|(k, v)| (&k[..], Some(&v[..]))),
919			)
920		});
921
922		let (root, transaction) = self.old_state.full_storage_root(
923			storage.top.iter().map(|(k, v)| (&k[..], Some(&v[..]))),
924			child_delta,
925			state_version,
926		);
927
928		self.db_updates = transaction;
929		Ok(root)
930	}
931}
932
933impl<Block: BlockT> crate::client_api::backend::BlockImportOperation<Block>
934	for BlockImportOperation<Block>
935{
936	type State = RecordStatsState<RefTrackingState<Block>, Block>;
937
938	fn state(&self) -> ClientResult<Option<&Self::State>> {
939		Ok(Some(&self.old_state))
940	}
941
942	fn set_block_data(
943		&mut self,
944		header: Block::Header,
945		body: Option<Vec<Block::Extrinsic>>,
946		indexed_body: Option<Vec<Vec<u8>>>,
947		justifications: Option<Justifications>,
948		leaf_state: NewBlockState,
949		register_as_leaf: bool,
950	) -> ClientResult<()> {
951		assert!(self.pending_block.is_none(), "Only one block per operation is allowed");
952		self.pending_block = Some(PendingBlock {
953			header,
954			body,
955			indexed_body,
956			justifications,
957			leaf_state,
958			register_as_leaf,
959		});
960		Ok(())
961	}
962
963	fn update_db_storage(
964		&mut self,
965		update: PrefixedMemoryDB<HashingFor<Block>>,
966	) -> ClientResult<()> {
967		self.db_updates = update;
968		Ok(())
969	}
970
971	fn reset_storage(
972		&mut self,
973		storage: Storage,
974		state_version: StateVersion,
975	) -> ClientResult<Block::Hash> {
976		let root = self.apply_new_state(storage, state_version)?;
977		self.commit_state = true;
978		self.reset_storage = true;
979		Ok(root)
980	}
981
982	fn set_genesis_state(
983		&mut self,
984		storage: Storage,
985		commit: bool,
986		state_version: StateVersion,
987	) -> ClientResult<Block::Hash> {
988		let root = self.apply_new_state(storage, state_version)?;
989		self.commit_state = commit;
990		Ok(root)
991	}
992
993	fn insert_aux<I>(&mut self, ops: I) -> ClientResult<()>
994	where
995		I: IntoIterator<Item = (Vec<u8>, Option<Vec<u8>>)>,
996	{
997		self.aux_ops.append(&mut ops.into_iter().collect());
998		Ok(())
999	}
1000
1001	fn update_storage(
1002		&mut self,
1003		update: StorageCollection,
1004		child_update: ChildStorageCollection,
1005	) -> ClientResult<()> {
1006		self.storage_updates = update;
1007		self.child_storage_updates = child_update;
1008		Ok(())
1009	}
1010
1011	fn update_offchain_storage(
1012		&mut self,
1013		offchain_update: OffchainChangesCollection,
1014	) -> ClientResult<()> {
1015		self.offchain_storage_updates = offchain_update;
1016		Ok(())
1017	}
1018
1019	fn mark_finalized(
1020		&mut self,
1021		block: Block::Hash,
1022		justification: Option<Justification>,
1023	) -> ClientResult<()> {
1024		self.finalized_blocks.push((block, justification));
1025		Ok(())
1026	}
1027
1028	fn mark_head(&mut self, hash: Block::Hash) -> ClientResult<()> {
1029		assert!(self.set_head.is_none(), "Only one set head per operation is allowed");
1030		self.set_head = Some(hash);
1031		Ok(())
1032	}
1033
1034	fn update_transaction_index(&mut self, index_ops: Vec<IndexOperation>) -> ClientResult<()> {
1035		self.index_ops = index_ops;
1036		Ok(())
1037	}
1038
1039	fn set_create_gap(&mut self, create_gap: bool) {
1040		self.create_gap = create_gap;
1041	}
1042}
1043
1044struct StorageDb<Block: BlockT> {
1045	pub db: Arc<dyn Database<DbHash>>,
1046	pub state_db: StateDb<Block::Hash, Vec<u8>, StateMetaDb>,
1047	prefix_keys: bool,
1048}
1049
1050impl<Block: BlockT> subsoil::state_machine::Storage<HashingFor<Block>> for StorageDb<Block> {
1051	fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result<Option<DBValue>, String> {
1052		if self.prefix_keys {
1053			let key = prefixed_key::<HashingFor<Block>>(key, prefix);
1054			self.state_db.get(&key, self)
1055		} else {
1056			self.state_db.get(key.as_ref(), self)
1057		}
1058		.map_err(|e| format!("Database backend error: {e:?}"))
1059	}
1060}
1061
1062impl<Block: BlockT> self::state_db::NodeDb for StorageDb<Block> {
1063	type Error = io::Error;
1064	type Key = [u8];
1065
1066	fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
1067		Ok(self.db.get(columns::STATE, key))
1068	}
1069}
1070
1071struct DbGenesisStorage<Block: BlockT> {
1072	root: Block::Hash,
1073	storage: PrefixedMemoryDB<HashingFor<Block>>,
1074}
1075
1076impl<Block: BlockT> DbGenesisStorage<Block> {
1077	pub fn new(root: Block::Hash, storage: PrefixedMemoryDB<HashingFor<Block>>) -> Self {
1078		DbGenesisStorage { root, storage }
1079	}
1080}
1081
1082impl<Block: BlockT> subsoil::state_machine::Storage<HashingFor<Block>> for DbGenesisStorage<Block> {
1083	fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result<Option<DBValue>, String> {
1084		use hash_db::HashDB;
1085		Ok(self.storage.get(key, prefix))
1086	}
1087}
1088
1089struct EmptyStorage<Block: BlockT>(pub Block::Hash);
1090
1091impl<Block: BlockT> EmptyStorage<Block> {
1092	pub fn new() -> Self {
1093		let mut root = Block::Hash::default();
1094		let mut mdb = MemoryDB::<HashingFor<Block>>::default();
1095		// both triedbmut are the same on empty storage.
1096		subsoil::trie::trie_types::TrieDBMutBuilderV1::<HashingFor<Block>>::new(
1097			&mut mdb, &mut root,
1098		)
1099		.build();
1100		EmptyStorage(root)
1101	}
1102}
1103
1104impl<Block: BlockT> subsoil::state_machine::Storage<HashingFor<Block>> for EmptyStorage<Block> {
1105	fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result<Option<DBValue>, String> {
1106		Ok(None)
1107	}
1108}
1109
1110/// Frozen `value` at time `at`.
1111///
1112/// Used as inner structure under lock in `FrozenForDuration`.
1113struct Frozen<T: Clone> {
1114	at: std::time::Instant,
1115	value: Option<T>,
1116}
1117
1118/// Some value frozen for period of time.
1119///
1120/// If time `duration` not passed since the value was instantiated,
1121/// current frozen value is returned. Otherwise, you have to provide
1122/// a new value which will be again frozen for `duration`.
1123pub(crate) struct FrozenForDuration<T: Clone> {
1124	duration: std::time::Duration,
1125	value: parking_lot::Mutex<Frozen<T>>,
1126}
1127
1128impl<T: Clone> FrozenForDuration<T> {
1129	fn new(duration: std::time::Duration) -> Self {
1130		Self { duration, value: Frozen { at: std::time::Instant::now(), value: None }.into() }
1131	}
1132
1133	fn take_or_else<F>(&self, f: F) -> T
1134	where
1135		F: FnOnce() -> T,
1136	{
1137		let mut lock = self.value.lock();
1138		let now = std::time::Instant::now();
1139		match lock.value.as_ref() {
1140			Some(value) if now.saturating_duration_since(lock.at) <= self.duration => value.clone(),
1141			_ => {
1142				let new_value = f();
1143				lock.at = now;
1144				lock.value = Some(new_value.clone());
1145				new_value
1146			},
1147		}
1148	}
1149}
1150
1151/// Disk backend.
1152///
1153/// Disk backend keeps data in a key-value store. In archive mode, trie nodes are kept from all
1154/// blocks. Otherwise, trie nodes are kept only from some recent blocks.
1155pub struct Backend<Block: BlockT> {
1156	storage: Arc<StorageDb<Block>>,
1157	offchain_storage: offchain::LocalStorage,
1158	blockchain: BlockchainDb<Block>,
1159	canonicalization_delay: u64,
1160	import_lock: Arc<RwLock<()>>,
1161	is_archive: bool,
1162	blocks_pruning: BlocksPruning,
1163	io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>,
1164	state_usage: Arc<StateUsageStats>,
1165	genesis_state: RwLock<Option<Arc<DbGenesisStorage<Block>>>>,
1166	shared_trie_cache: Option<subsoil::trie::cache::SharedTrieCache<HashingFor<Block>>>,
1167	pruning_filters: Vec<Arc<dyn PruningFilter>>,
1168}
1169
1170impl<Block: BlockT> Backend<Block> {
1171	/// Create a new instance of database backend.
1172	///
1173	/// The pruning window is how old a block must be before the state is pruned.
1174	pub fn new(db_config: DatabaseSettings, canonicalization_delay: u64) -> ClientResult<Self> {
1175		use utils::OpenDbError;
1176
1177		let db_source = &db_config.source;
1178
1179		let (needs_init, db) =
1180			match self::utils::open_database::<Block>(db_source, DatabaseType::Full, false) {
1181				Ok(db) => (false, db),
1182				Err(OpenDbError::DoesNotExist) => {
1183					let db =
1184						self::utils::open_database::<Block>(db_source, DatabaseType::Full, true)?;
1185					(true, db)
1186				},
1187				Err(as_is) => return Err(as_is.into()),
1188			};
1189
1190		Self::from_database(db as Arc<_>, canonicalization_delay, &db_config, needs_init)
1191	}
1192
1193	/// Reset the shared trie cache.
1194	pub fn reset_trie_cache(&self) {
1195		if let Some(cache) = &self.shared_trie_cache {
1196			cache.reset();
1197		}
1198	}
1199
1200	/// Create new memory-backed client backend for tests.
1201	#[cfg(any(test, feature = "test-helpers"))]
1202	pub fn new_test(blocks_pruning: u32, canonicalization_delay: u64) -> Self {
1203		Self::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), canonicalization_delay)
1204	}
1205
1206	/// Create new memory-backed client backend for tests with custom pruning filters.
1207	#[cfg(any(test, feature = "test-helpers"))]
1208	pub fn new_test_with_pruning_filters(
1209		blocks_pruning: u32,
1210		canonicalization_delay: u64,
1211		pruning_filters: Vec<Arc<dyn PruningFilter>>,
1212	) -> Self {
1213		Self::new_test_with_tx_storage_and_filters(
1214			BlocksPruning::Some(blocks_pruning),
1215			canonicalization_delay,
1216			pruning_filters,
1217		)
1218	}
1219
1220	/// Create new memory-backed client backend for tests.
1221	#[cfg(any(test, feature = "test-helpers"))]
1222	pub fn new_test_with_tx_storage(
1223		blocks_pruning: BlocksPruning,
1224		canonicalization_delay: u64,
1225	) -> Self {
1226		Self::new_test_with_tx_storage_and_filters(
1227			blocks_pruning,
1228			canonicalization_delay,
1229			Default::default(),
1230		)
1231	}
1232
1233	/// Create new memory-backed client backend for tests with custom pruning filters.
1234	#[cfg(any(test, feature = "test-helpers"))]
1235	pub fn new_test_with_tx_storage_and_filters(
1236		blocks_pruning: BlocksPruning,
1237		canonicalization_delay: u64,
1238		pruning_filters: Vec<Arc<dyn PruningFilter>>,
1239	) -> Self {
1240		let db = kvdb_memorydb::create(self::utils::NUM_COLUMNS);
1241		let db = subsoil::database::as_database(db);
1242		let state_pruning = match blocks_pruning {
1243			BlocksPruning::KeepAll => PruningMode::ArchiveAll,
1244			BlocksPruning::KeepFinalized => PruningMode::ArchiveCanonical,
1245			BlocksPruning::Some(n) => PruningMode::blocks_pruning(n),
1246		};
1247		let db_setting = DatabaseSettings {
1248			trie_cache_maximum_size: Some(16 * 1024 * 1024),
1249			state_pruning: Some(state_pruning),
1250			source: DatabaseSource::Custom { db, require_create_flag: true },
1251			blocks_pruning,
1252			pruning_filters,
1253			metrics_registry: None,
1254		};
1255
1256		Self::new(db_setting, canonicalization_delay).expect("failed to create test-db")
1257	}
1258
1259	/// Expose the Database that is used by this backend.
1260	/// The second argument is the Column that stores the State.
1261	///
1262	/// Should only be needed for benchmarking.
1263	#[cfg(feature = "runtime-benchmarks")]
1264	pub fn expose_db(
1265		&self,
1266	) -> (Arc<dyn subsoil::database::Database<DbHash>>, subsoil::database::ColumnId) {
1267		(self.storage.db.clone(), columns::STATE)
1268	}
1269
1270	/// Expose the Storage that is used by this backend.
1271	///
1272	/// Should only be needed for benchmarking.
1273	#[cfg(feature = "runtime-benchmarks")]
1274	pub fn expose_storage(&self) -> Arc<dyn subsoil::state_machine::Storage<HashingFor<Block>>> {
1275		self.storage.clone()
1276	}
1277
1278	/// Expose the shared trie cache that is used by this backend.
1279	///
1280	/// Should only be needed for benchmarking.
1281	#[cfg(feature = "runtime-benchmarks")]
1282	pub fn expose_shared_trie_cache(
1283		&self,
1284	) -> Option<subsoil::trie::cache::SharedTrieCache<HashingFor<Block>>> {
1285		self.shared_trie_cache.clone()
1286	}
1287
1288	fn from_database(
1289		db: Arc<dyn Database<DbHash>>,
1290		canonicalization_delay: u64,
1291		config: &DatabaseSettings,
1292		should_init: bool,
1293	) -> ClientResult<Self> {
1294		let mut db_init_transaction = Transaction::new();
1295
1296		let requested_state_pruning = config.state_pruning.clone();
1297		let state_meta_db = StateMetaDb(db.clone());
1298		let map_e = crate::blockchain::Error::from_state_db;
1299
1300		let (state_db_init_commit_set, state_db) = StateDb::open(
1301			state_meta_db,
1302			requested_state_pruning,
1303			!db.supports_ref_counting(),
1304			should_init,
1305		)
1306		.map_err(map_e)?;
1307
1308		apply_state_commit(&mut db_init_transaction, state_db_init_commit_set);
1309
1310		let state_pruning_used = state_db.pruning_mode();
1311		let is_archive_pruning = state_pruning_used.is_archive();
1312		let blockchain = BlockchainDb::new(db.clone())?;
1313
1314		let storage_db =
1315			StorageDb { db: db.clone(), state_db, prefix_keys: !db.supports_ref_counting() };
1316
1317		let offchain_storage = offchain::LocalStorage::new(db.clone());
1318
1319		let shared_trie_cache = config.trie_cache_maximum_size.map(|maximum_size| {
1320			let system_memory = sysinfo::System::new_all();
1321			let used_memory = system_memory.used_memory();
1322			let total_memory = system_memory.total_memory();
1323
1324			debug!("Initializing shared trie cache with size {} bytes, {}% of total memory", maximum_size, (maximum_size as f64 / total_memory as f64 * 100.0));
1325			if maximum_size as u64 > total_memory - used_memory {
1326				warn!(
1327					"Not enough memory to initialize shared trie cache. Cache size: {} bytes. System memory: used {} bytes, total {} bytes",
1328					maximum_size, used_memory, total_memory,
1329				);
1330			}
1331
1332			config
1333				.metrics_registry
1334				.as_ref()
1335				.and_then(|registry| PrometheusTrieCacheMetrics::register(registry).ok())
1336				.map(Arc::new)
1337				.map_or_else(
1338					|| SharedTrieCache::new(subsoil::trie::cache::CacheSize::new(maximum_size)),
1339					|metrics| {
1340						SharedTrieCache::with_metrics(
1341							subsoil::trie::cache::CacheSize::new(maximum_size),
1342							metrics,
1343						)
1344					},
1345				)
1346		});
1347
1348		let backend = Backend {
1349			storage: Arc::new(storage_db),
1350			offchain_storage,
1351			blockchain,
1352			canonicalization_delay,
1353			import_lock: Default::default(),
1354			is_archive: is_archive_pruning,
1355			io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)),
1356			state_usage: Arc::new(StateUsageStats::new()),
1357			blocks_pruning: config.blocks_pruning,
1358			genesis_state: RwLock::new(None),
1359			shared_trie_cache,
1360			pruning_filters: config.pruning_filters.clone(),
1361		};
1362
1363		// Older DB versions have no last state key. Check if the state is available and set it.
1364		let info = backend.blockchain.info();
1365		if info.finalized_state.is_none()
1366			&& info.finalized_hash != Default::default()
1367			&& crate::client_api::Backend::have_state_at(
1368				&backend,
1369				info.finalized_hash,
1370				info.finalized_number,
1371			) {
1372			backend.blockchain.update_meta(MetaUpdate {
1373				hash: info.finalized_hash,
1374				number: info.finalized_number,
1375				is_best: info.finalized_hash == info.best_hash,
1376				is_finalized: true,
1377				with_state: true,
1378			});
1379		}
1380
1381		db.commit(db_init_transaction)?;
1382
1383		Ok(backend)
1384	}
1385
1386	/// Handle setting head within a transaction. `route_to` should be the last
1387	/// block that existed in the database. `best_to` should be the best block
1388	/// to be set.
1389	///
1390	/// In the case where the new best block is a block to be imported, `route_to`
1391	/// should be the parent of `best_to`. In the case where we set an existing block
1392	/// to be best, `route_to` should equal to `best_to`.
1393	fn set_head_with_transaction(
1394		&self,
1395		transaction: &mut Transaction<DbHash>,
1396		route_to: Block::Hash,
1397		best_to: (NumberFor<Block>, Block::Hash),
1398	) -> ClientResult<(Vec<Block::Hash>, Vec<Block::Hash>)> {
1399		let mut enacted = Vec::default();
1400		let mut retracted = Vec::default();
1401
1402		let (best_number, best_hash) = best_to;
1403
1404		let meta = self.blockchain.meta.read();
1405
1406		if meta.best_number.saturating_sub(best_number).saturated_into::<u64>()
1407			> self.canonicalization_delay
1408		{
1409			return Err(crate::blockchain::Error::SetHeadTooOld);
1410		}
1411
1412		let parent_exists =
1413			self.blockchain.status(route_to)? == crate::blockchain::BlockStatus::InChain;
1414
1415		// Cannot find tree route with empty DB or when imported a detached block.
1416		if meta.best_hash != Default::default() && parent_exists {
1417			let tree_route =
1418				crate::blockchain::tree_route(&self.blockchain, meta.best_hash, route_to)?;
1419
1420			// uncanonicalize: check safety violations and ensure the numbers no longer
1421			// point to these block hashes in the key mapping.
1422			for r in tree_route.retracted() {
1423				if r.hash == meta.finalized_hash {
1424					warn!(
1425						"Potential safety failure: reverting finalized block {:?}",
1426						(&r.number, &r.hash)
1427					);
1428
1429					return Err(crate::blockchain::Error::NotInFinalizedChain);
1430				}
1431
1432				retracted.push(r.hash);
1433				utils::remove_number_to_key_mapping(transaction, columns::KEY_LOOKUP, r.number)?;
1434			}
1435
1436			// canonicalize: set the number lookup to map to this block's hash.
1437			for e in tree_route.enacted() {
1438				enacted.push(e.hash);
1439				utils::insert_number_to_key_mapping(
1440					transaction,
1441					columns::KEY_LOOKUP,
1442					e.number,
1443					e.hash,
1444				)?;
1445			}
1446		}
1447
1448		let lookup_key = utils::number_and_hash_to_lookup_key(best_number, &best_hash)?;
1449		transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, lookup_key);
1450		utils::insert_number_to_key_mapping(
1451			transaction,
1452			columns::KEY_LOOKUP,
1453			best_number,
1454			best_hash,
1455		)?;
1456
1457		Ok((enacted, retracted))
1458	}
1459
1460	fn ensure_sequential_finalization(
1461		&self,
1462		header: &Block::Header,
1463		last_finalized: Option<Block::Hash>,
1464	) -> ClientResult<()> {
1465		let last_finalized =
1466			last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash);
1467		if last_finalized != self.blockchain.meta.read().genesis_hash
1468			&& *header.parent_hash() != last_finalized
1469		{
1470			return Err(crate::blockchain::Error::NonSequentialFinalization(format!(
1471				"Last finalized {last_finalized:?} not parent of {:?}",
1472				header.hash()
1473			)));
1474		}
1475		Ok(())
1476	}
1477
1478	/// `remove_displaced` can be set to `false` if this is not the last of many subsequent calls
1479	/// for performance reasons.
1480	fn finalize_block_with_transaction(
1481		&self,
1482		transaction: &mut Transaction<DbHash>,
1483		hash: Block::Hash,
1484		header: &Block::Header,
1485		last_finalized: Option<Block::Hash>,
1486		justification: Option<Justification>,
1487		current_transaction_justifications: &mut HashMap<Block::Hash, Justification>,
1488		remove_displaced: bool,
1489	) -> ClientResult<MetaUpdate<Block>> {
1490		// TODO: ensure best chain contains this block.
1491		let number = *header.number();
1492		self.ensure_sequential_finalization(header, last_finalized)?;
1493		let with_state = crate::client_api::Backend::have_state_at(self, hash, number);
1494
1495		self.note_finalized(
1496			transaction,
1497			header,
1498			hash,
1499			with_state,
1500			current_transaction_justifications,
1501			remove_displaced,
1502		)?;
1503
1504		if let Some(justification) = justification {
1505			transaction.set_from_vec(
1506				columns::JUSTIFICATIONS,
1507				&utils::number_and_hash_to_lookup_key(number, hash)?,
1508				Justifications::from(justification.clone()).encode(),
1509			);
1510			current_transaction_justifications.insert(hash, justification);
1511		}
1512		Ok(MetaUpdate { hash, number, is_best: false, is_finalized: true, with_state })
1513	}
1514
1515	// performs forced canonicalization with a delay after importing a non-finalized block.
1516	fn force_delayed_canonicalize(
1517		&self,
1518		transaction: &mut Transaction<DbHash>,
1519	) -> ClientResult<()> {
1520		let best_canonical = match self.storage.state_db.last_canonicalized() {
1521			LastCanonicalized::None => 0,
1522			LastCanonicalized::Block(b) => b,
1523			// Nothing needs to be done when canonicalization is not happening.
1524			LastCanonicalized::NotCanonicalizing => return Ok(()),
1525		};
1526
1527		let info = self.blockchain.info();
1528		let best_number: u64 = self.blockchain.info().best_number.saturated_into();
1529
1530		for to_canonicalize in
1531			best_canonical + 1..=best_number.saturating_sub(self.canonicalization_delay)
1532		{
1533			let hash_to_canonicalize = crate::client_api::blockchain::HeaderBackend::hash(
1534				&self.blockchain,
1535				to_canonicalize.saturated_into(),
1536			)?
1537			.ok_or_else(|| {
1538				let best_hash = info.best_hash;
1539
1540				crate::blockchain::Error::Backend(format!(
1541					"Can't canonicalize missing block number #{to_canonicalize} when for best block {best_hash:?} (#{best_number})",
1542				))
1543			})?;
1544
1545			if !crate::client_api::Backend::have_state_at(
1546				self,
1547				hash_to_canonicalize,
1548				to_canonicalize.saturated_into(),
1549			) {
1550				return Ok(());
1551			}
1552
1553			trace!(target: "db", "Canonicalize block #{to_canonicalize} ({hash_to_canonicalize:?})");
1554			let commit = self.storage.state_db.canonicalize_block(&hash_to_canonicalize).map_err(
1555				crate::blockchain::Error::from_state_db::<
1556					self::state_db::Error<subsoil::database::error::DatabaseError>,
1557				>,
1558			)?;
1559			apply_state_commit(transaction, commit);
1560		}
1561
1562		Ok(())
1563	}
1564
1565	fn try_commit_operation(&self, mut operation: BlockImportOperation<Block>) -> ClientResult<()> {
1566		let mut transaction = Transaction::new();
1567
1568		operation.apply_aux(&mut transaction);
1569		operation.apply_offchain(&mut transaction);
1570
1571		let mut meta_updates = Vec::with_capacity(operation.finalized_blocks.len());
1572		let (best_num, mut last_finalized_hash, mut last_finalized_num, mut block_gap) = {
1573			let meta = self.blockchain.meta.read();
1574			(meta.best_number, meta.finalized_hash, meta.finalized_number, meta.block_gap)
1575		};
1576
1577		let mut block_gap_updated = false;
1578
1579		let mut current_transaction_justifications: HashMap<Block::Hash, Justification> =
1580			HashMap::new();
1581		let mut finalized_blocks = operation.finalized_blocks.into_iter().peekable();
1582		while let Some((block_hash, justification)) = finalized_blocks.next() {
1583			let block_header = self.blockchain.expect_header(block_hash)?;
1584			meta_updates.push(self.finalize_block_with_transaction(
1585				&mut transaction,
1586				block_hash,
1587				&block_header,
1588				Some(last_finalized_hash),
1589				justification,
1590				&mut current_transaction_justifications,
1591				finalized_blocks.peek().is_none(),
1592			)?);
1593			last_finalized_hash = block_hash;
1594			last_finalized_num = *block_header.number();
1595		}
1596
1597		let imported = if let Some(pending_block) = operation.pending_block {
1598			let hash = pending_block.header.hash();
1599
1600			let parent_hash = *pending_block.header.parent_hash();
1601			let number = *pending_block.header.number();
1602			let highest_leaf = self
1603				.blockchain
1604				.leaves
1605				.read()
1606				.highest_leaf()
1607				.map(|(n, _)| n)
1608				.unwrap_or(Zero::zero());
1609			let header_exists_in_db =
1610				number <= highest_leaf && self.blockchain.header(hash)?.is_some();
1611			// Body in DB (not incoming block) - needed to update gap when adding body to existing
1612			// header.
1613			let body_exists_in_db = self.blockchain.body(hash)?.is_some();
1614			// Incoming block has body - used for fast sync gap handling.
1615			let incoming_has_body = pending_block.body.is_some();
1616
1617			// blocks are keyed by number + hash.
1618			let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?;
1619
1620			if pending_block.leaf_state.is_best() {
1621				self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?;
1622			};
1623
1624			utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash)?;
1625
1626			transaction.set_from_vec(columns::HEADER, &lookup_key, pending_block.header.encode());
1627			if let Some(body) = pending_block.body {
1628				// If we have any index operations we save block in the new format with indexed
1629				// extrinsic headers Otherwise we save the body as a single blob.
1630				if operation.index_ops.is_empty() {
1631					transaction.set_from_vec(columns::BODY, &lookup_key, body.encode());
1632				} else {
1633					let body =
1634						apply_index_ops::<Block>(&mut transaction, body, operation.index_ops);
1635					transaction.set_from_vec(columns::BODY_INDEX, &lookup_key, body);
1636				}
1637			}
1638			if let Some(body) = pending_block.indexed_body {
1639				apply_indexed_body::<Block>(&mut transaction, body);
1640			}
1641			if let Some(justifications) = pending_block.justifications {
1642				transaction.set_from_vec(
1643					columns::JUSTIFICATIONS,
1644					&lookup_key,
1645					justifications.encode(),
1646				);
1647			}
1648
1649			if number.is_zero() {
1650				transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref());
1651
1652				if operation.commit_state {
1653					transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key);
1654				} else {
1655					// When we don't want to commit the genesis state, we still preserve it in
1656					// memory to bootstrap consensus. It is queried for an initial list of
1657					// authorities, etc.
1658					*self.genesis_state.write() = Some(Arc::new(DbGenesisStorage::new(
1659						*pending_block.header.state_root(),
1660						operation.db_updates.clone(),
1661					)));
1662				}
1663			}
1664
1665			let finalized = if operation.commit_state {
1666				let mut changeset: self::state_db::ChangeSet<Vec<u8>> =
1667					self::state_db::ChangeSet::default();
1668				let mut ops: u64 = 0;
1669				let mut bytes: u64 = 0;
1670				let mut removal: u64 = 0;
1671				let mut bytes_removal: u64 = 0;
1672				for (mut key, (val, rc)) in operation.db_updates.drain() {
1673					self.storage.db.sanitize_key(&mut key);
1674					if rc > 0 {
1675						ops += 1;
1676						bytes += key.len() as u64 + val.len() as u64;
1677						if rc == 1 {
1678							changeset.inserted.push((key, val.to_vec()));
1679						} else {
1680							changeset.inserted.push((key.clone(), val.to_vec()));
1681							for _ in 0..rc - 1 {
1682								changeset.inserted.push((key.clone(), Default::default()));
1683							}
1684						}
1685					} else if rc < 0 {
1686						removal += 1;
1687						bytes_removal += key.len() as u64;
1688						if rc == -1 {
1689							changeset.deleted.push(key);
1690						} else {
1691							for _ in 0..-rc {
1692								changeset.deleted.push(key.clone());
1693							}
1694						}
1695					}
1696				}
1697				self.state_usage.tally_writes_nodes(ops, bytes);
1698				self.state_usage.tally_removed_nodes(removal, bytes_removal);
1699
1700				let mut ops: u64 = 0;
1701				let mut bytes: u64 = 0;
1702				for (key, value) in operation
1703					.storage_updates
1704					.iter()
1705					.chain(operation.child_storage_updates.iter().flat_map(|(_, s)| s.iter()))
1706				{
1707					ops += 1;
1708					bytes += key.len() as u64;
1709					if let Some(v) = value.as_ref() {
1710						bytes += v.len() as u64;
1711					}
1712				}
1713				self.state_usage.tally_writes(ops, bytes);
1714				let number_u64 = number.saturated_into::<u64>();
1715				let commit = self
1716					.storage
1717					.state_db
1718					.insert_block(&hash, number_u64, pending_block.header.parent_hash(), changeset)
1719					.map_err(
1720						|e: self::state_db::Error<subsoil::database::error::DatabaseError>| {
1721							crate::blockchain::Error::from_state_db(e)
1722						},
1723					)?;
1724				apply_state_commit(&mut transaction, commit);
1725				if number <= last_finalized_num {
1726					// Canonicalize in the db when re-importing existing blocks with state.
1727					let commit = self.storage.state_db.canonicalize_block(&hash).map_err(
1728						crate::blockchain::Error::from_state_db::<
1729							self::state_db::Error<subsoil::database::error::DatabaseError>,
1730						>,
1731					)?;
1732					apply_state_commit(&mut transaction, commit);
1733					meta_updates.push(MetaUpdate {
1734						hash,
1735						number,
1736						is_best: false,
1737						is_finalized: true,
1738						with_state: true,
1739					});
1740				}
1741
1742				// Check if need to finalize. Genesis is always finalized instantly.
1743				let finalized = number_u64 == 0 || pending_block.leaf_state.is_final();
1744				finalized
1745			} else {
1746				(number.is_zero() && last_finalized_num.is_zero())
1747					|| pending_block.leaf_state.is_final()
1748			};
1749
1750			let header = &pending_block.header;
1751			let is_best = pending_block.leaf_state.is_best();
1752			trace!(
1753				target: "db",
1754				"DB Commit {hash:?} ({number}), best={is_best}, state={}, header_in_db={header_exists_in_db} body_in_db={body_exists_in_db} incoming_body={incoming_has_body}, finalized={finalized}",
1755				operation.commit_state,
1756			);
1757
1758			self.state_usage.merge_sm(operation.old_state.usage_info());
1759
1760			// release state reference so that it can be finalized
1761			// VERY IMPORTANT
1762			drop(operation.old_state);
1763
1764			if finalized {
1765				// TODO: ensure best chain contains this block.
1766				self.ensure_sequential_finalization(header, Some(last_finalized_hash))?;
1767				let mut current_transaction_justifications = HashMap::new();
1768				self.note_finalized(
1769					&mut transaction,
1770					header,
1771					hash,
1772					operation.commit_state,
1773					&mut current_transaction_justifications,
1774					true,
1775				)?;
1776			} else {
1777				// canonicalize blocks which are old enough, regardless of finality.
1778				self.force_delayed_canonicalize(&mut transaction)?
1779			}
1780
1781			if !header_exists_in_db {
1782				// Add a new leaf if the block has the potential to be finalized.
1783				if pending_block.register_as_leaf
1784					&& (number > last_finalized_num || last_finalized_num.is_zero())
1785				{
1786					let mut leaves = self.blockchain.leaves.write();
1787					leaves.import(hash, number, parent_hash);
1788					leaves.prepare_transaction(
1789						&mut transaction,
1790						columns::META,
1791						meta_keys::LEAF_PREFIX,
1792					);
1793				}
1794
1795				let mut children = children::read_children(
1796					&*self.storage.db,
1797					columns::META,
1798					meta_keys::CHILDREN_PREFIX,
1799					parent_hash,
1800				)?;
1801				if !children.contains(&hash) {
1802					children.push(hash);
1803					children::write_children(
1804						&mut transaction,
1805						columns::META,
1806						meta_keys::CHILDREN_PREFIX,
1807						parent_hash,
1808						children,
1809					);
1810				}
1811			}
1812
1813			let should_check_block_gap = !header_exists_in_db || !body_exists_in_db;
1814			debug!(
1815				target: "db",
1816				"should_check_block_gap = {should_check_block_gap}",
1817			);
1818
1819			if should_check_block_gap {
1820				let update_gap =
1821					|transaction: &mut Transaction<DbHash>,
1822					 new_gap: BlockGap<NumberFor<Block>>,
1823					 block_gap: &mut Option<BlockGap<NumberFor<Block>>>| {
1824						transaction.set(columns::META, meta_keys::BLOCK_GAP, &new_gap.encode());
1825						transaction.set(
1826							columns::META,
1827							meta_keys::BLOCK_GAP_VERSION,
1828							&BLOCK_GAP_CURRENT_VERSION.encode(),
1829						);
1830						block_gap.replace(new_gap);
1831						debug!(target: "db", "Update block gap. {block_gap:?}");
1832					};
1833
1834				let remove_gap =
1835					|transaction: &mut Transaction<DbHash>,
1836					 block_gap: &mut Option<BlockGap<NumberFor<Block>>>| {
1837						transaction.remove(columns::META, meta_keys::BLOCK_GAP);
1838						transaction.remove(columns::META, meta_keys::BLOCK_GAP_VERSION);
1839						*block_gap = None;
1840						debug!(target: "db", "Removed block gap.");
1841					};
1842
1843				if let Some(mut gap) = block_gap {
1844					match gap.gap_type {
1845						BlockGapType::MissingHeaderAndBody => {
1846							// Handle blocks at gap start or immediately following (possibly
1847							// indicating blocks already imported during warp sync where
1848							// start was not updated).
1849							if number == gap.start {
1850								gap.start = number + One::one();
1851								utils::insert_number_to_key_mapping(
1852									&mut transaction,
1853									columns::KEY_LOOKUP,
1854									number,
1855									hash,
1856								)?;
1857								if gap.start > gap.end {
1858									remove_gap(&mut transaction, &mut block_gap);
1859								} else {
1860									update_gap(&mut transaction, gap, &mut block_gap);
1861								}
1862								block_gap_updated = true;
1863							}
1864						},
1865						BlockGapType::MissingBody => {
1866							// Gap increased when syncing the header chain during fast sync.
1867							if number == gap.end + One::one() && !incoming_has_body {
1868								gap.end += One::one();
1869								utils::insert_number_to_key_mapping(
1870									&mut transaction,
1871									columns::KEY_LOOKUP,
1872									number,
1873									hash,
1874								)?;
1875								update_gap(&mut transaction, gap, &mut block_gap);
1876								block_gap_updated = true;
1877							// Gap decreased when downloading the full blocks.
1878							} else if number == gap.start && incoming_has_body {
1879								gap.start += One::one();
1880								if gap.start > gap.end {
1881									remove_gap(&mut transaction, &mut block_gap);
1882								} else {
1883									update_gap(&mut transaction, gap, &mut block_gap);
1884								}
1885								block_gap_updated = true;
1886							}
1887						},
1888					}
1889				} else if operation.create_gap {
1890					if number > best_num + One::one()
1891						&& self.blockchain.header(parent_hash)?.is_none()
1892					{
1893						let gap = BlockGap {
1894							start: best_num + One::one(),
1895							end: number - One::one(),
1896							gap_type: BlockGapType::MissingHeaderAndBody,
1897						};
1898						update_gap(&mut transaction, gap, &mut block_gap);
1899						block_gap_updated = true;
1900						debug!(target: "db", "Detected block gap (warp sync) {block_gap:?}");
1901					} else if number == best_num + One::one()
1902						&& self.blockchain.header(parent_hash)?.is_some()
1903						&& !incoming_has_body
1904					{
1905						let gap = BlockGap {
1906							start: number,
1907							end: number,
1908							gap_type: BlockGapType::MissingBody,
1909						};
1910						update_gap(&mut transaction, gap, &mut block_gap);
1911						block_gap_updated = true;
1912						debug!(target: "db", "Detected block gap (fast sync) {block_gap:?}");
1913					}
1914				}
1915			}
1916
1917			meta_updates.push(MetaUpdate {
1918				hash,
1919				number,
1920				is_best: pending_block.leaf_state.is_best(),
1921				is_finalized: finalized,
1922				with_state: operation.commit_state,
1923			});
1924			Some((pending_block.header, hash))
1925		} else {
1926			None
1927		};
1928
1929		if let Some(set_head) = operation.set_head {
1930			if let Some(header) =
1931				crate::client_api::blockchain::HeaderBackend::header(&self.blockchain, set_head)?
1932			{
1933				let number = header.number();
1934				let hash = header.hash();
1935
1936				self.set_head_with_transaction(&mut transaction, hash, (*number, hash))?;
1937
1938				meta_updates.push(MetaUpdate {
1939					hash,
1940					number: *number,
1941					is_best: true,
1942					is_finalized: false,
1943					with_state: false,
1944				});
1945			} else {
1946				return Err(crate::blockchain::Error::UnknownBlock(format!(
1947					"Cannot set head {set_head:?}",
1948				)));
1949			}
1950		}
1951
1952		self.storage.db.commit(transaction)?;
1953
1954		// `reset_storage == true` means the entire state got replaced.
1955		// In this case we optimize the `STATE` column to improve read performance.
1956		if operation.reset_storage {
1957			if let Err(e) = self.storage.db.optimize_db_col(columns::STATE) {
1958				warn!(target: "db", "Failed to optimize database after state import: {e:?}");
1959			}
1960		}
1961
1962		// Apply all in-memory state changes.
1963		// Code beyond this point can't fail.
1964
1965		if let Some((header, hash)) = imported {
1966			trace!(target: "db", "DB Commit done {hash:?}");
1967			let header_metadata = CachedHeaderMetadata::from(&header);
1968			self.blockchain.insert_header_metadata(header_metadata.hash, header_metadata);
1969			cache_header(&mut self.blockchain.header_cache.lock(), hash, Some(header));
1970		}
1971
1972		for m in meta_updates {
1973			self.blockchain.update_meta(m);
1974		}
1975		if block_gap_updated {
1976			self.blockchain.update_block_gap(block_gap);
1977		}
1978
1979		Ok(())
1980	}
1981
1982	// Write stuff to a transaction after a new block is finalized. This canonicalizes finalized
1983	// blocks. Fails if called with a block which was not a child of the last finalized block.
1984	/// `remove_displaced` can be set to `false` if this is not the last of many subsequent calls
1985	/// for performance reasons.
1986	fn note_finalized(
1987		&self,
1988		transaction: &mut Transaction<DbHash>,
1989		f_header: &Block::Header,
1990		f_hash: Block::Hash,
1991		with_state: bool,
1992		current_transaction_justifications: &mut HashMap<Block::Hash, Justification>,
1993		remove_displaced: bool,
1994	) -> ClientResult<()> {
1995		let f_num = *f_header.number();
1996
1997		let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash)?;
1998		if with_state {
1999			transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key.clone());
2000		}
2001		transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key);
2002
2003		let requires_canonicalization = match self.storage.state_db.last_canonicalized() {
2004			LastCanonicalized::None => true,
2005			LastCanonicalized::Block(b) => f_num.saturated_into::<u64>() > b,
2006			LastCanonicalized::NotCanonicalizing => false,
2007		};
2008
2009		if requires_canonicalization
2010			&& crate::client_api::Backend::have_state_at(self, f_hash, f_num)
2011		{
2012			let commit = self.storage.state_db.canonicalize_block(&f_hash).map_err(
2013				crate::blockchain::Error::from_state_db::<
2014					self::state_db::Error<subsoil::database::error::DatabaseError>,
2015				>,
2016			)?;
2017			apply_state_commit(transaction, commit);
2018		}
2019
2020		if remove_displaced {
2021			let new_displaced = self.blockchain.displaced_leaves_after_finalizing(
2022				f_hash,
2023				f_num,
2024				*f_header.parent_hash(),
2025			)?;
2026
2027			self.blockchain.leaves.write().remove_displaced_leaves(FinalizationOutcome::new(
2028				new_displaced.displaced_leaves.iter().copied(),
2029			));
2030
2031			if !matches!(self.blocks_pruning, BlocksPruning::KeepAll) {
2032				self.prune_displaced_branches(transaction, &new_displaced)?;
2033			}
2034		}
2035
2036		self.prune_blocks(transaction, f_num, current_transaction_justifications)?;
2037
2038		Ok(())
2039	}
2040
2041	fn prune_blocks(
2042		&self,
2043		transaction: &mut Transaction<DbHash>,
2044		finalized_number: NumberFor<Block>,
2045		current_transaction_justifications: &mut HashMap<Block::Hash, Justification>,
2046	) -> ClientResult<()> {
2047		if let BlocksPruning::Some(blocks_pruning) = self.blocks_pruning {
2048			// Always keep the last finalized block
2049			let keep = std::cmp::max(blocks_pruning, 1);
2050			if finalized_number >= keep.into() {
2051				let number = finalized_number.saturating_sub(keep.into());
2052
2053				// Before we prune a block, check if it is pinned
2054				if let Some(hash) = self.blockchain.hash(number)? {
2055					// Check if any pruning filter wants to preserve this block.
2056					// We need to check both the current transaction justifications (not yet in DB)
2057					// and the DB itself (for justifications from previous transactions).
2058					if !self.pruning_filters.is_empty() {
2059						let justifications = match current_transaction_justifications.get(&hash) {
2060							Some(j) => Some(Justifications::from(j.clone())),
2061							None => self.blockchain.justifications(hash)?,
2062						};
2063
2064						let should_retain = justifications
2065							.map(|j| self.pruning_filters.iter().any(|f| f.should_retain(&j)))
2066							.unwrap_or(false);
2067
2068						// We can just return here, pinning can be ignored since the block will
2069						// remain in the DB.
2070						if should_retain {
2071							debug!(
2072								target: "db",
2073								"Preserving block #{number} ({hash}) due to keep predicate match"
2074							);
2075							return Ok(());
2076						}
2077					}
2078
2079					self.blockchain.insert_persisted_body_if_pinned(hash)?;
2080
2081					// If the block was finalized in this transaction, it will not be in the db
2082					// yet.
2083					if let Some(justification) = current_transaction_justifications.remove(&hash) {
2084						self.blockchain.insert_justifications_if_pinned(hash, justification);
2085					} else {
2086						self.blockchain.insert_persisted_justifications_if_pinned(hash)?;
2087					}
2088				};
2089
2090				self.prune_block(transaction, BlockId::<Block>::number(number))?;
2091			}
2092		}
2093		Ok(())
2094	}
2095
2096	fn prune_displaced_branches(
2097		&self,
2098		transaction: &mut Transaction<DbHash>,
2099		displaced: &DisplacedLeavesAfterFinalization<Block>,
2100	) -> ClientResult<()> {
2101		// Discard all blocks from displaced branches
2102		for &hash in displaced.displaced_blocks.iter() {
2103			self.blockchain.insert_persisted_body_if_pinned(hash)?;
2104			self.prune_block(transaction, BlockId::<Block>::hash(hash))?;
2105		}
2106		Ok(())
2107	}
2108
2109	fn prune_block(
2110		&self,
2111		transaction: &mut Transaction<DbHash>,
2112		id: BlockId<Block>,
2113	) -> ClientResult<()> {
2114		debug!(target: "db", "Removing block #{id}");
2115		utils::remove_from_db(
2116			transaction,
2117			&*self.storage.db,
2118			columns::KEY_LOOKUP,
2119			columns::BODY,
2120			id,
2121		)?;
2122		utils::remove_from_db(
2123			transaction,
2124			&*self.storage.db,
2125			columns::KEY_LOOKUP,
2126			columns::JUSTIFICATIONS,
2127			id,
2128		)?;
2129		if let Some(index) =
2130			read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)?
2131		{
2132			utils::remove_from_db(
2133				transaction,
2134				&*self.storage.db,
2135				columns::KEY_LOOKUP,
2136				columns::BODY_INDEX,
2137				id,
2138			)?;
2139			match Vec::<DbExtrinsic<Block>>::decode(&mut &index[..]) {
2140				Ok(index) => {
2141					for ex in index {
2142						if let DbExtrinsic::Indexed { hash, .. } = ex {
2143							transaction.release(columns::TRANSACTION, hash);
2144						}
2145					}
2146				},
2147				Err(err) => {
2148					return Err(crate::blockchain::Error::Backend(format!(
2149						"Error decoding body list: {err}",
2150					)))
2151				},
2152			}
2153		}
2154		Ok(())
2155	}
2156
2157	fn empty_state(&self) -> RecordStatsState<RefTrackingState<Block>, Block> {
2158		let root = EmptyStorage::<Block>::new().0; // Empty trie
2159		let db_state = DbStateBuilder::<HashingFor<Block>>::new(self.storage.clone(), root)
2160			.with_optional_cache(self.shared_trie_cache.as_ref().map(|c| c.local_cache_untrusted()))
2161			.build();
2162		let state = RefTrackingState::new(db_state, self.storage.clone(), None);
2163		RecordStatsState::new(state, None, self.state_usage.clone())
2164	}
2165}
2166
2167fn apply_state_commit(
2168	transaction: &mut Transaction<DbHash>,
2169	commit: self::state_db::CommitSet<Vec<u8>>,
2170) {
2171	for (key, val) in commit.data.inserted.into_iter() {
2172		transaction.set_from_vec(columns::STATE, &key[..], val);
2173	}
2174	for key in commit.data.deleted.into_iter() {
2175		transaction.remove(columns::STATE, &key[..]);
2176	}
2177	for (key, val) in commit.meta.inserted.into_iter() {
2178		transaction.set_from_vec(columns::STATE_META, &key[..], val);
2179	}
2180	for key in commit.meta.deleted.into_iter() {
2181		transaction.remove(columns::STATE_META, &key[..]);
2182	}
2183}
2184
2185fn apply_index_ops<Block: BlockT>(
2186	transaction: &mut Transaction<DbHash>,
2187	body: Vec<Block::Extrinsic>,
2188	ops: Vec<IndexOperation>,
2189) -> Vec<u8> {
2190	let mut extrinsic_index: Vec<DbExtrinsic<Block>> = Vec::with_capacity(body.len());
2191	let mut index_map = HashMap::new();
2192	let mut renewed_map = HashMap::new();
2193	for op in ops {
2194		match op {
2195			IndexOperation::Insert { extrinsic, hash, size } => {
2196				index_map.insert(extrinsic, (hash, size));
2197			},
2198			IndexOperation::Renew { extrinsic, hash } => {
2199				renewed_map.insert(extrinsic, DbHash::from_slice(hash.as_ref()));
2200			},
2201		}
2202	}
2203	for (index, extrinsic) in body.into_iter().enumerate() {
2204		let db_extrinsic = if let Some(hash) = renewed_map.get(&(index as u32)) {
2205			// Bump ref counter
2206			let extrinsic = extrinsic.encode();
2207			transaction.reference(columns::TRANSACTION, DbHash::from_slice(hash.as_ref()));
2208			DbExtrinsic::Indexed { hash: *hash, header: extrinsic }
2209		} else {
2210			match index_map.get(&(index as u32)) {
2211				Some((hash, size)) => {
2212					let encoded = extrinsic.encode();
2213					if *size as usize <= encoded.len() {
2214						let offset = encoded.len() - *size as usize;
2215						transaction.store(
2216							columns::TRANSACTION,
2217							DbHash::from_slice(hash.as_ref()),
2218							encoded[offset..].to_vec(),
2219						);
2220						DbExtrinsic::Indexed {
2221							hash: DbHash::from_slice(hash.as_ref()),
2222							header: encoded[..offset].to_vec(),
2223						}
2224					} else {
2225						// Invalid indexed slice. Just store full data and don't index anything.
2226						DbExtrinsic::Full(extrinsic)
2227					}
2228				},
2229				_ => DbExtrinsic::Full(extrinsic),
2230			}
2231		};
2232		extrinsic_index.push(db_extrinsic);
2233	}
2234	debug!(
2235		target: "db",
2236		"DB transaction index: {} inserted, {} renewed, {} full",
2237		index_map.len(),
2238		renewed_map.len(),
2239		extrinsic_index.len() - index_map.len() - renewed_map.len(),
2240	);
2241	extrinsic_index.encode()
2242}
2243
2244fn apply_indexed_body<Block: BlockT>(transaction: &mut Transaction<DbHash>, body: Vec<Vec<u8>>) {
2245	for extrinsic in body {
2246		let hash = subsoil::runtime::traits::BlakeTwo256::hash(&extrinsic);
2247		transaction.store(columns::TRANSACTION, DbHash::from_slice(hash.as_ref()), extrinsic);
2248	}
2249}
2250
2251impl<Block> crate::client_api::backend::AuxStore for Backend<Block>
2252where
2253	Block: BlockT,
2254{
2255	fn insert_aux<
2256		'a,
2257		'b: 'a,
2258		'c: 'a,
2259		I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
2260		D: IntoIterator<Item = &'a &'b [u8]>,
2261	>(
2262		&self,
2263		insert: I,
2264		delete: D,
2265	) -> ClientResult<()> {
2266		let mut transaction = Transaction::new();
2267		for (k, v) in insert {
2268			transaction.set(columns::AUX, k, v);
2269		}
2270		for k in delete {
2271			transaction.remove(columns::AUX, k);
2272		}
2273		self.storage.db.commit(transaction)?;
2274		Ok(())
2275	}
2276
2277	fn get_aux(&self, key: &[u8]) -> ClientResult<Option<Vec<u8>>> {
2278		Ok(self.storage.db.get(columns::AUX, key))
2279	}
2280}
2281
2282impl<Block: BlockT> crate::client_api::backend::Backend<Block> for Backend<Block> {
2283	type BlockImportOperation = BlockImportOperation<Block>;
2284	type Blockchain = BlockchainDb<Block>;
2285	type State = RecordStatsState<RefTrackingState<Block>, Block>;
2286	type OffchainStorage = offchain::LocalStorage;
2287
2288	fn begin_operation(&self) -> ClientResult<Self::BlockImportOperation> {
2289		Ok(BlockImportOperation {
2290			pending_block: None,
2291			old_state: self.empty_state(),
2292			db_updates: PrefixedMemoryDB::default(),
2293			storage_updates: Default::default(),
2294			child_storage_updates: Default::default(),
2295			offchain_storage_updates: Default::default(),
2296			aux_ops: Vec::new(),
2297			finalized_blocks: Vec::new(),
2298			set_head: None,
2299			commit_state: false,
2300			create_gap: true,
2301			reset_storage: false,
2302			index_ops: Default::default(),
2303		})
2304	}
2305
2306	fn begin_state_operation(
2307		&self,
2308		operation: &mut Self::BlockImportOperation,
2309		block: Block::Hash,
2310	) -> ClientResult<()> {
2311		if block == Default::default() {
2312			operation.old_state = self.empty_state();
2313		} else {
2314			operation.old_state = self.state_at(block, TrieCacheContext::Untrusted)?;
2315		}
2316
2317		operation.commit_state = true;
2318		Ok(())
2319	}
2320
2321	fn commit_operation(&self, operation: Self::BlockImportOperation) -> ClientResult<()> {
2322		let usage = operation.old_state.usage_info();
2323		self.state_usage.merge_sm(usage);
2324
2325		if let Err(e) = self.try_commit_operation(operation) {
2326			let state_meta_db = StateMetaDb(self.storage.db.clone());
2327			self.storage
2328				.state_db
2329				.reset(state_meta_db)
2330				.map_err(crate::blockchain::Error::from_state_db)?;
2331			self.blockchain.clear_pinning_cache();
2332			Err(e)
2333		} else {
2334			self.storage.state_db.sync();
2335			Ok(())
2336		}
2337	}
2338
2339	fn finalize_block(
2340		&self,
2341		hash: Block::Hash,
2342		justification: Option<Justification>,
2343	) -> ClientResult<()> {
2344		let mut transaction = Transaction::new();
2345		let header = self.blockchain.expect_header(hash)?;
2346
2347		let mut current_transaction_justifications = HashMap::new();
2348		let m = self.finalize_block_with_transaction(
2349			&mut transaction,
2350			hash,
2351			&header,
2352			None,
2353			justification,
2354			&mut current_transaction_justifications,
2355			true,
2356		)?;
2357
2358		self.storage.db.commit(transaction)?;
2359		self.blockchain.update_meta(m);
2360		Ok(())
2361	}
2362
2363	fn append_justification(
2364		&self,
2365		hash: Block::Hash,
2366		justification: Justification,
2367	) -> ClientResult<()> {
2368		let mut transaction: Transaction<DbHash> = Transaction::new();
2369		let header = self.blockchain.expect_header(hash)?;
2370		let number = *header.number();
2371
2372		// Check if the block is finalized first.
2373		let is_descendent_of = is_descendent_of(&self.blockchain, None);
2374		let last_finalized = self.blockchain.last_finalized()?;
2375
2376		// We can do a quick check first, before doing a proper but more expensive check
2377		if number > self.blockchain.info().finalized_number
2378			|| (hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?)
2379		{
2380			return Err(ClientError::NotInFinalizedChain);
2381		}
2382
2383		let justifications = if let Some(mut stored_justifications) =
2384			self.blockchain.justifications(hash)?
2385		{
2386			if !stored_justifications.append(justification) {
2387				return Err(ClientError::BadJustification("Duplicate consensus engine ID".into()));
2388			}
2389			stored_justifications
2390		} else {
2391			Justifications::from(justification)
2392		};
2393
2394		transaction.set_from_vec(
2395			columns::JUSTIFICATIONS,
2396			&utils::number_and_hash_to_lookup_key(number, hash)?,
2397			justifications.encode(),
2398		);
2399
2400		self.storage.db.commit(transaction)?;
2401
2402		Ok(())
2403	}
2404
2405	fn offchain_storage(&self) -> Option<Self::OffchainStorage> {
2406		Some(self.offchain_storage.clone())
2407	}
2408
2409	fn usage_info(&self) -> Option<UsageInfo> {
2410		let (io_stats, state_stats) = self.io_stats.take_or_else(|| {
2411			(
2412				// TODO: implement DB stats and cache size retrieval
2413				kvdb::IoStats::empty(),
2414				self.state_usage.take(),
2415			)
2416		});
2417		let database_cache = MemorySize::from_bytes(0);
2418		let state_cache = MemorySize::from_bytes(
2419			self.shared_trie_cache.as_ref().map_or(0, |c| c.used_memory_size()),
2420		);
2421
2422		Some(UsageInfo {
2423			memory: MemoryInfo { state_cache, database_cache },
2424			io: IoInfo {
2425				transactions: io_stats.transactions,
2426				bytes_read: io_stats.bytes_read,
2427				bytes_written: io_stats.bytes_written,
2428				writes: io_stats.writes,
2429				reads: io_stats.reads,
2430				average_transaction_size: io_stats.avg_transaction_size() as u64,
2431				state_reads: state_stats.reads.ops,
2432				state_writes: state_stats.writes.ops,
2433				state_writes_cache: state_stats.overlay_writes.ops,
2434				state_reads_cache: state_stats.cache_reads.ops,
2435				state_writes_nodes: state_stats.nodes_writes.ops,
2436			},
2437		})
2438	}
2439
2440	fn revert(
2441		&self,
2442		n: NumberFor<Block>,
2443		revert_finalized: bool,
2444	) -> ClientResult<(NumberFor<Block>, HashSet<Block::Hash>)> {
2445		let mut reverted_finalized = HashSet::new();
2446
2447		let info = self.blockchain.info();
2448
2449		let highest_leaf = self
2450			.blockchain
2451			.leaves
2452			.read()
2453			.highest_leaf()
2454			.and_then(|(n, h)| h.last().map(|h| (n, *h)));
2455
2456		let best_number = info.best_number;
2457		let best_hash = info.best_hash;
2458
2459		let finalized = info.finalized_number;
2460
2461		let revertible = best_number - finalized;
2462		let n = if !revert_finalized && revertible < n { revertible } else { n };
2463
2464		let (n, mut number_to_revert, mut hash_to_revert) = match highest_leaf {
2465			Some((l_n, l_h)) => (n + (l_n - best_number), l_n, l_h),
2466			None => (n, best_number, best_hash),
2467		};
2468
2469		let mut revert_blocks = || -> ClientResult<NumberFor<Block>> {
2470			for c in 0..n.saturated_into::<u64>() {
2471				if number_to_revert.is_zero() {
2472					return Ok(c.saturated_into::<NumberFor<Block>>());
2473				}
2474				let mut transaction = Transaction::new();
2475				let removed = self.blockchain.header(hash_to_revert)?.ok_or_else(|| {
2476					crate::blockchain::Error::UnknownBlock(format!(
2477						"Error reverting to {hash_to_revert}. Block header not found.",
2478					))
2479				})?;
2480				let removed_hash = hash_to_revert;
2481
2482				let prev_number = number_to_revert.saturating_sub(One::one());
2483				let prev_hash =
2484					if prev_number == best_number { best_hash } else { *removed.parent_hash() };
2485
2486				if !self.have_state_at(prev_hash, prev_number) {
2487					return Ok(c.saturated_into::<NumberFor<Block>>());
2488				}
2489
2490				match self.storage.state_db.revert_one() {
2491					Some(commit) => {
2492						apply_state_commit(&mut transaction, commit);
2493
2494						number_to_revert = prev_number;
2495						hash_to_revert = prev_hash;
2496
2497						let update_finalized = number_to_revert < finalized;
2498
2499						let key = utils::number_and_hash_to_lookup_key(
2500							number_to_revert,
2501							&hash_to_revert,
2502						)?;
2503						if update_finalized {
2504							transaction.set_from_vec(
2505								columns::META,
2506								meta_keys::FINALIZED_BLOCK,
2507								key.clone(),
2508							);
2509
2510							reverted_finalized.insert(removed_hash);
2511							if let Some((hash, _)) = self.blockchain.info().finalized_state {
2512								if hash == hash_to_revert {
2513									if !number_to_revert.is_zero()
2514										&& self.have_state_at(prev_hash, prev_number)
2515									{
2516										let lookup_key = utils::number_and_hash_to_lookup_key(
2517											prev_number,
2518											prev_hash,
2519										)?;
2520										transaction.set_from_vec(
2521											columns::META,
2522											meta_keys::FINALIZED_STATE,
2523											lookup_key,
2524										);
2525									} else {
2526										transaction
2527											.remove(columns::META, meta_keys::FINALIZED_STATE);
2528									}
2529								}
2530							}
2531						}
2532
2533						transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, key);
2534						transaction.remove(columns::KEY_LOOKUP, removed_hash.as_ref());
2535						children::remove_children(
2536							&mut transaction,
2537							columns::META,
2538							meta_keys::CHILDREN_PREFIX,
2539							hash_to_revert,
2540						);
2541						self.prune_block(&mut transaction, BlockId::Hash(removed_hash))?;
2542						remove_from_db::<Block>(
2543							&mut transaction,
2544							&*self.storage.db,
2545							columns::KEY_LOOKUP,
2546							columns::HEADER,
2547							BlockId::Hash(removed_hash),
2548						)?;
2549
2550						self.storage.db.commit(transaction)?;
2551
2552						// Clean the cache
2553						self.blockchain.remove_header_metadata(removed_hash);
2554
2555						let is_best = number_to_revert < best_number;
2556
2557						self.blockchain.update_meta(MetaUpdate {
2558							hash: hash_to_revert,
2559							number: number_to_revert,
2560							is_best,
2561							is_finalized: update_finalized,
2562							with_state: false,
2563						});
2564					},
2565					None => return Ok(c.saturated_into::<NumberFor<Block>>()),
2566				}
2567			}
2568
2569			Ok(n)
2570		};
2571
2572		let reverted = revert_blocks()?;
2573
2574		let revert_leaves = || -> ClientResult<()> {
2575			let mut transaction = Transaction::new();
2576			let mut leaves = self.blockchain.leaves.write();
2577
2578			leaves.revert(hash_to_revert, number_to_revert).into_iter().try_for_each(
2579				|(h, _)| {
2580					self.blockchain.remove_header_metadata(h);
2581					transaction.remove(columns::KEY_LOOKUP, h.as_ref());
2582
2583					self.prune_block(&mut transaction, BlockId::Hash(h))?;
2584					remove_from_db::<Block>(
2585						&mut transaction,
2586						&*self.storage.db,
2587						columns::KEY_LOOKUP,
2588						columns::HEADER,
2589						BlockId::Hash(h),
2590					)?;
2591
2592					Ok::<_, ClientError>(())
2593				},
2594			)?;
2595			leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX);
2596			self.storage.db.commit(transaction)?;
2597
2598			Ok(())
2599		};
2600
2601		revert_leaves()?;
2602
2603		Ok((reverted, reverted_finalized))
2604	}
2605
2606	fn remove_leaf_block(&self, hash: Block::Hash) -> ClientResult<()> {
2607		let best_hash = self.blockchain.info().best_hash;
2608
2609		if best_hash == hash {
2610			return Err(crate::blockchain::Error::Backend(format!(
2611				"Can't remove best block {hash:?}"
2612			)));
2613		}
2614
2615		let hdr = self.blockchain.header_metadata(hash)?;
2616		if !self.have_state_at(hash, hdr.number) {
2617			return Err(crate::blockchain::Error::UnknownBlock(format!(
2618				"State already discarded for {hash:?}",
2619			)));
2620		}
2621
2622		let mut leaves = self.blockchain.leaves.write();
2623		if !leaves.contains(hdr.number, hash) {
2624			return Err(crate::blockchain::Error::Backend(format!(
2625				"Can't remove non-leaf block {hash:?}",
2626			)));
2627		}
2628
2629		let mut transaction = Transaction::new();
2630		if let Some(commit) = self.storage.state_db.remove(&hash) {
2631			apply_state_commit(&mut transaction, commit);
2632		}
2633		transaction.remove(columns::KEY_LOOKUP, hash.as_ref());
2634
2635		let children: Vec<_> = self
2636			.blockchain()
2637			.children(hdr.parent)?
2638			.into_iter()
2639			.filter(|child_hash| *child_hash != hash)
2640			.collect();
2641		let parent_leaf = if children.is_empty() {
2642			children::remove_children(
2643				&mut transaction,
2644				columns::META,
2645				meta_keys::CHILDREN_PREFIX,
2646				hdr.parent,
2647			);
2648			Some(hdr.parent)
2649		} else {
2650			children::write_children(
2651				&mut transaction,
2652				columns::META,
2653				meta_keys::CHILDREN_PREFIX,
2654				hdr.parent,
2655				children,
2656			);
2657			None
2658		};
2659
2660		let remove_outcome = leaves.remove(hash, hdr.number, parent_leaf);
2661		leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX);
2662		if let Err(e) = self.storage.db.commit(transaction) {
2663			if let Some(outcome) = remove_outcome {
2664				leaves.undo().undo_remove(outcome);
2665			}
2666			return Err(e.into());
2667		}
2668		self.blockchain().remove_header_metadata(hash);
2669		Ok(())
2670	}
2671
2672	fn blockchain(&self) -> &BlockchainDb<Block> {
2673		&self.blockchain
2674	}
2675
2676	fn state_at(
2677		&self,
2678		hash: Block::Hash,
2679		trie_cache_context: TrieCacheContext,
2680	) -> ClientResult<Self::State> {
2681		if hash == self.blockchain.meta.read().genesis_hash {
2682			if let Some(genesis_state) = &*self.genesis_state.read() {
2683				let root = genesis_state.root;
2684				let db_state =
2685					DbStateBuilder::<HashingFor<Block>>::new(genesis_state.clone(), root)
2686						.with_optional_cache(self.shared_trie_cache.as_ref().map(|c| {
2687							if matches!(trie_cache_context, TrieCacheContext::Trusted) {
2688								c.local_cache_trusted()
2689							} else {
2690								c.local_cache_untrusted()
2691							}
2692						}))
2693						.build();
2694
2695				let state = RefTrackingState::new(db_state, self.storage.clone(), None);
2696				return Ok(RecordStatsState::new(state, None, self.state_usage.clone()));
2697			}
2698		}
2699
2700		match self.blockchain.header_metadata(hash) {
2701			Ok(ref hdr) => {
2702				let hint = || {
2703					self::state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref())
2704						.unwrap_or(None)
2705						.is_some()
2706				};
2707
2708				if let Ok(()) =
2709					self.storage.state_db.pin(&hash, hdr.number.saturated_into::<u64>(), hint)
2710				{
2711					let root = hdr.state_root;
2712					let db_state =
2713						DbStateBuilder::<HashingFor<Block>>::new(self.storage.clone(), root)
2714							.with_optional_cache(self.shared_trie_cache.as_ref().map(|c| {
2715								if matches!(trie_cache_context, TrieCacheContext::Trusted) {
2716									c.local_cache_trusted()
2717								} else {
2718									c.local_cache_untrusted()
2719								}
2720							}))
2721							.build();
2722					let state = RefTrackingState::new(db_state, self.storage.clone(), Some(hash));
2723					Ok(RecordStatsState::new(state, Some(hash), self.state_usage.clone()))
2724				} else {
2725					Err(crate::blockchain::Error::UnknownBlock(format!(
2726						"State already discarded for {hash:?}",
2727					)))
2728				}
2729			},
2730			Err(e) => Err(e),
2731		}
2732	}
2733
2734	fn have_state_at(&self, hash: Block::Hash, number: NumberFor<Block>) -> bool {
2735		if self.is_archive {
2736			match self.blockchain.header_metadata(hash) {
2737				Ok(header) => subsoil::state_machine::Storage::get(
2738					self.storage.as_ref(),
2739					&header.state_root,
2740					(&[], None),
2741				)
2742				.unwrap_or(None)
2743				.is_some(),
2744				_ => false,
2745			}
2746		} else {
2747			match self.storage.state_db.is_pruned(&hash, number.saturated_into::<u64>()) {
2748				IsPruned::Pruned => false,
2749				IsPruned::NotPruned => true,
2750				IsPruned::MaybePruned => match self.blockchain.header_metadata(hash) {
2751					Ok(header) => subsoil::state_machine::Storage::get(
2752						self.storage.as_ref(),
2753						&header.state_root,
2754						(&[], None),
2755					)
2756					.unwrap_or(None)
2757					.is_some(),
2758					_ => false,
2759				},
2760			}
2761		}
2762	}
2763
2764	fn get_import_lock(&self) -> &RwLock<()> {
2765		&self.import_lock
2766	}
2767
2768	fn requires_full_sync(&self) -> bool {
2769		matches!(
2770			self.storage.state_db.pruning_mode(),
2771			PruningMode::ArchiveAll | PruningMode::ArchiveCanonical
2772		)
2773	}
2774
2775	fn pin_block(&self, hash: <Block as BlockT>::Hash) -> crate::blockchain::Result<()> {
2776		let hint = || {
2777			let header_metadata = self.blockchain.header_metadata(hash);
2778			header_metadata
2779				.map(|hdr| {
2780					self::state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref())
2781						.unwrap_or(None)
2782						.is_some()
2783				})
2784				.unwrap_or(false)
2785		};
2786
2787		if let Some(number) = self.blockchain.number(hash)? {
2788			self.storage.state_db.pin(&hash, number.saturated_into::<u64>(), hint).map_err(
2789				|_| {
2790					crate::blockchain::Error::UnknownBlock(format!(
2791						"Unable to pin: state already discarded for `{hash:?}`",
2792					))
2793				},
2794			)?;
2795		} else {
2796			return Err(ClientError::UnknownBlock(format!(
2797				"Can not pin block with hash `{hash:?}`. Block not found.",
2798			)));
2799		}
2800
2801		if self.blocks_pruning != BlocksPruning::KeepAll {
2802			// Only increase reference count for this hash. Value is loaded once we prune.
2803			self.blockchain.bump_ref(hash);
2804		}
2805		Ok(())
2806	}
2807
2808	fn unpin_block(&self, hash: <Block as BlockT>::Hash) {
2809		self.storage.state_db.unpin(&hash);
2810
2811		if self.blocks_pruning != BlocksPruning::KeepAll {
2812			self.blockchain.unpin(hash);
2813		}
2814	}
2815}
2816
2817impl<Block: BlockT> crate::client_api::backend::LocalBackend<Block> for Backend<Block> {}
2818
2819#[cfg(test)]
2820pub(crate) mod tests {
2821	use super::*;
2822	use super::{columns, utils::number_and_hash_to_lookup_key};
2823	use crate::blockchain::{lowest_common_ancestor, tree_route};
2824	use crate::client_api::{
2825		backend::{Backend as BTrait, BlockImportOperation as Op},
2826		blockchain::Backend as BLBTrait,
2827	};
2828	use hash_db::{HashDB, EMPTY_PREFIX};
2829	use subsoil::core::H256;
2830	use subsoil::runtime::{
2831		testing::{Block as RawBlock, Header, MockCallU64, TestXt},
2832		traits::{BlakeTwo256, Hash},
2833		ConsensusEngineId, StateVersion,
2834	};
2835
2836	const CONS0_ENGINE_ID: ConsensusEngineId = *b"CON0";
2837	const CONS1_ENGINE_ID: ConsensusEngineId = *b"CON1";
2838
2839	type UncheckedXt = TestXt<MockCallU64, ()>;
2840	pub(crate) type Block = RawBlock<UncheckedXt>;
2841
2842	pub fn insert_header(
2843		backend: &Backend<Block>,
2844		number: u64,
2845		parent_hash: H256,
2846		changes: Option<Vec<(Vec<u8>, Vec<u8>)>>,
2847		extrinsics_root: H256,
2848	) -> H256 {
2849		insert_block(backend, number, parent_hash, changes, extrinsics_root, Vec::new(), None)
2850			.unwrap()
2851	}
2852
2853	pub fn insert_block(
2854		backend: &Backend<Block>,
2855		number: u64,
2856		parent_hash: H256,
2857		_changes: Option<Vec<(Vec<u8>, Vec<u8>)>>,
2858		extrinsics_root: H256,
2859		body: Vec<UncheckedXt>,
2860		transaction_index: Option<Vec<IndexOperation>>,
2861	) -> Result<H256, crate::blockchain::Error> {
2862		use subsoil::runtime::testing::Digest;
2863
2864		let digest = Digest::default();
2865		let mut header =
2866			Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root };
2867
2868		let block_hash = if number == 0 { Default::default() } else { parent_hash };
2869		let mut op = backend.begin_operation().unwrap();
2870		backend.begin_state_operation(&mut op, block_hash).unwrap();
2871		if let Some(index) = transaction_index {
2872			op.update_transaction_index(index).unwrap();
2873		}
2874
2875		// Insert some fake data to ensure that the block can be found in the state column.
2876		let (root, overlay) = op.old_state.storage_root(
2877			vec![(block_hash.as_ref(), Some(block_hash.as_ref()))].into_iter(),
2878			StateVersion::V1,
2879		);
2880		op.update_db_storage(overlay).unwrap();
2881		header.state_root = root.into();
2882
2883		op.set_block_data(header.clone(), Some(body), None, None, NewBlockState::Best, true)
2884			.unwrap();
2885
2886		backend.commit_operation(op)?;
2887
2888		Ok(header.hash())
2889	}
2890
2891	pub fn insert_disconnected_header(
2892		backend: &Backend<Block>,
2893		number: u64,
2894		parent_hash: H256,
2895		extrinsics_root: H256,
2896		best: bool,
2897	) -> H256 {
2898		use subsoil::runtime::testing::Digest;
2899
2900		let digest = Digest::default();
2901		let header =
2902			Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root };
2903
2904		let mut op = backend.begin_operation().unwrap();
2905
2906		op.set_block_data(
2907			header.clone(),
2908			Some(vec![]),
2909			None,
2910			None,
2911			if best { NewBlockState::Best } else { NewBlockState::Normal },
2912			true,
2913		)
2914		.unwrap();
2915
2916		backend.commit_operation(op).unwrap();
2917
2918		header.hash()
2919	}
2920
2921	pub fn insert_header_no_head(
2922		backend: &Backend<Block>,
2923		number: u64,
2924		parent_hash: H256,
2925		extrinsics_root: H256,
2926	) -> H256 {
2927		use subsoil::runtime::testing::Digest;
2928
2929		let digest = Digest::default();
2930		let mut header =
2931			Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root };
2932		let mut op = backend.begin_operation().unwrap();
2933
2934		let root = backend
2935			.state_at(parent_hash, TrieCacheContext::Untrusted)
2936			.unwrap_or_else(|_| {
2937				if parent_hash == Default::default() {
2938					backend.empty_state()
2939				} else {
2940					panic!("Unknown block: {parent_hash:?}")
2941				}
2942			})
2943			.storage_root(
2944				vec![(parent_hash.as_ref(), Some(parent_hash.as_ref()))].into_iter(),
2945				StateVersion::V1,
2946			)
2947			.0;
2948		header.state_root = root.into();
2949
2950		op.set_block_data(header.clone(), None, None, None, NewBlockState::Normal, true)
2951			.unwrap();
2952		backend.commit_operation(op).unwrap();
2953
2954		header.hash()
2955	}
2956
2957	#[test]
2958	fn block_hash_inserted_correctly() {
2959		let backing = {
2960			let db = Backend::<Block>::new_test(1, 0);
2961			for i in 0..10 {
2962				assert!(db.blockchain().hash(i).unwrap().is_none());
2963
2964				{
2965					let hash = if i == 0 {
2966						Default::default()
2967					} else {
2968						db.blockchain.hash(i - 1).unwrap().unwrap()
2969					};
2970
2971					let mut op = db.begin_operation().unwrap();
2972					db.begin_state_operation(&mut op, hash).unwrap();
2973					let header = Header {
2974						number: i,
2975						parent_hash: hash,
2976						state_root: Default::default(),
2977						digest: Default::default(),
2978						extrinsics_root: Default::default(),
2979					};
2980
2981					op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
2982						.unwrap();
2983					db.commit_operation(op).unwrap();
2984				}
2985
2986				assert!(db.blockchain().hash(i).unwrap().is_some())
2987			}
2988			db.storage.db.clone()
2989		};
2990
2991		let backend = Backend::<Block>::new(
2992			DatabaseSettings {
2993				trie_cache_maximum_size: Some(16 * 1024 * 1024),
2994				state_pruning: Some(PruningMode::blocks_pruning(1)),
2995				source: DatabaseSource::Custom { db: backing, require_create_flag: false },
2996				blocks_pruning: BlocksPruning::KeepFinalized,
2997				pruning_filters: Default::default(),
2998				metrics_registry: None,
2999			},
3000			0,
3001		)
3002		.unwrap();
3003		assert_eq!(backend.blockchain().info().best_number, 9);
3004		for i in 0..10 {
3005			assert!(backend.blockchain().hash(i).unwrap().is_some())
3006		}
3007	}
3008
3009	#[test]
3010	fn set_state_data() {
3011		set_state_data_inner(StateVersion::V0);
3012		set_state_data_inner(StateVersion::V1);
3013	}
3014	fn set_state_data_inner(state_version: StateVersion) {
3015		let db = Backend::<Block>::new_test(2, 0);
3016		let hash = {
3017			let mut op = db.begin_operation().unwrap();
3018			let mut header = Header {
3019				number: 0,
3020				parent_hash: Default::default(),
3021				state_root: Default::default(),
3022				digest: Default::default(),
3023				extrinsics_root: Default::default(),
3024			};
3025
3026			let storage = vec![(vec![1, 3, 5], vec![2, 4, 6]), (vec![1, 2, 3], vec![9, 9, 9])];
3027
3028			header.state_root = op
3029				.old_state
3030				.storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..]))), state_version)
3031				.0
3032				.into();
3033			let hash = header.hash();
3034
3035			op.reset_storage(
3036				Storage {
3037					top: storage.into_iter().collect(),
3038					children_default: Default::default(),
3039				},
3040				state_version,
3041			)
3042			.unwrap();
3043			op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best, true)
3044				.unwrap();
3045
3046			db.commit_operation(op).unwrap();
3047
3048			let state = db.state_at(hash, TrieCacheContext::Untrusted).unwrap();
3049
3050			assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6]));
3051			assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9]));
3052			assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None);
3053
3054			hash
3055		};
3056
3057		{
3058			let mut op = db.begin_operation().unwrap();
3059			db.begin_state_operation(&mut op, hash).unwrap();
3060			let mut header = Header {
3061				number: 1,
3062				parent_hash: hash,
3063				state_root: Default::default(),
3064				digest: Default::default(),
3065				extrinsics_root: Default::default(),
3066			};
3067
3068			let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))];
3069
3070			let (root, overlay) = op.old_state.storage_root(
3071				storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
3072				state_version,
3073			);
3074			op.update_db_storage(overlay).unwrap();
3075			header.state_root = root.into();
3076
3077			op.update_storage(storage, Vec::new()).unwrap();
3078			op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best, true)
3079				.unwrap();
3080
3081			db.commit_operation(op).unwrap();
3082
3083			let state = db.state_at(header.hash(), TrieCacheContext::Untrusted).unwrap();
3084
3085			assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None);
3086			assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9]));
3087			assert_eq!(state.storage(&[5, 5, 5]).unwrap(), Some(vec![4, 5, 6]));
3088		}
3089	}
3090
3091	#[test]
3092	fn delete_only_when_negative_rc() {
3093		subsoil::tracing::try_init_simple();
3094		let state_version = StateVersion::default();
3095		let key;
3096		let backend = Backend::<Block>::new_test(1, 0);
3097
3098		let hash = {
3099			let mut op = backend.begin_operation().unwrap();
3100			backend.begin_state_operation(&mut op, Default::default()).unwrap();
3101			let mut header = Header {
3102				number: 0,
3103				parent_hash: Default::default(),
3104				state_root: Default::default(),
3105				digest: Default::default(),
3106				extrinsics_root: Default::default(),
3107			};
3108
3109			header.state_root =
3110				op.old_state.storage_root(std::iter::empty(), state_version).0.into();
3111			let hash = header.hash();
3112
3113			op.reset_storage(
3114				Storage { top: Default::default(), children_default: Default::default() },
3115				state_version,
3116			)
3117			.unwrap();
3118
3119			key = op.db_updates.insert(EMPTY_PREFIX, b"hello");
3120			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3121				.unwrap();
3122
3123			backend.commit_operation(op).unwrap();
3124			assert_eq!(
3125				backend
3126					.storage
3127					.db
3128					.get(
3129						columns::STATE,
3130						&subsoil::trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX)
3131					)
3132					.unwrap(),
3133				&b"hello"[..]
3134			);
3135			hash
3136		};
3137
3138		let hashof1 = {
3139			let mut op = backend.begin_operation().unwrap();
3140			backend.begin_state_operation(&mut op, hash).unwrap();
3141			let mut header = Header {
3142				number: 1,
3143				parent_hash: hash,
3144				state_root: Default::default(),
3145				digest: Default::default(),
3146				extrinsics_root: Default::default(),
3147			};
3148
3149			let storage: Vec<(_, _)> = vec![];
3150
3151			header.state_root = op
3152				.old_state
3153				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
3154				.0
3155				.into();
3156			let hash = header.hash();
3157
3158			op.db_updates.insert(EMPTY_PREFIX, b"hello");
3159			op.db_updates.remove(&key, EMPTY_PREFIX);
3160			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3161				.unwrap();
3162
3163			backend.commit_operation(op).unwrap();
3164			assert_eq!(
3165				backend
3166					.storage
3167					.db
3168					.get(
3169						columns::STATE,
3170						&subsoil::trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX)
3171					)
3172					.unwrap(),
3173				&b"hello"[..]
3174			);
3175			hash
3176		};
3177
3178		let hashof2 = {
3179			let mut op = backend.begin_operation().unwrap();
3180			backend.begin_state_operation(&mut op, hashof1).unwrap();
3181			let mut header = Header {
3182				number: 2,
3183				parent_hash: hashof1,
3184				state_root: Default::default(),
3185				digest: Default::default(),
3186				extrinsics_root: Default::default(),
3187			};
3188
3189			let storage: Vec<(_, _)> = vec![];
3190
3191			header.state_root = op
3192				.old_state
3193				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
3194				.0
3195				.into();
3196			let hash = header.hash();
3197
3198			op.db_updates.remove(&key, EMPTY_PREFIX);
3199			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3200				.unwrap();
3201
3202			backend.commit_operation(op).unwrap();
3203
3204			assert!(backend
3205				.storage
3206				.db
3207				.get(
3208					columns::STATE,
3209					&subsoil::trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX)
3210				)
3211				.is_some());
3212			hash
3213		};
3214
3215		let hashof3 = {
3216			let mut op = backend.begin_operation().unwrap();
3217			backend.begin_state_operation(&mut op, hashof2).unwrap();
3218			let mut header = Header {
3219				number: 3,
3220				parent_hash: hashof2,
3221				state_root: Default::default(),
3222				digest: Default::default(),
3223				extrinsics_root: Default::default(),
3224			};
3225
3226			let storage: Vec<(_, _)> = vec![];
3227
3228			header.state_root = op
3229				.old_state
3230				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
3231				.0
3232				.into();
3233			let hash = header.hash();
3234
3235			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3236				.unwrap();
3237
3238			backend.commit_operation(op).unwrap();
3239			hash
3240		};
3241
3242		let hashof4 = {
3243			let mut op = backend.begin_operation().unwrap();
3244			backend.begin_state_operation(&mut op, hashof3).unwrap();
3245			let mut header = Header {
3246				number: 4,
3247				parent_hash: hashof3,
3248				state_root: Default::default(),
3249				digest: Default::default(),
3250				extrinsics_root: Default::default(),
3251			};
3252
3253			let storage: Vec<(_, _)> = vec![];
3254
3255			header.state_root = op
3256				.old_state
3257				.storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version)
3258				.0
3259				.into();
3260			let hash = header.hash();
3261
3262			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best, true)
3263				.unwrap();
3264
3265			backend.commit_operation(op).unwrap();
3266			assert!(backend
3267				.storage
3268				.db
3269				.get(
3270					columns::STATE,
3271					&subsoil::trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX)
3272				)
3273				.is_none());
3274			hash
3275		};
3276
3277		backend.finalize_block(hashof1, None).unwrap();
3278		backend.finalize_block(hashof2, None).unwrap();
3279		backend.finalize_block(hashof3, None).unwrap();
3280		backend.finalize_block(hashof4, None).unwrap();
3281		assert!(backend
3282			.storage
3283			.db
3284			.get(columns::STATE, &subsoil::trie::prefixed_key::<BlakeTwo256>(&key, EMPTY_PREFIX))
3285			.is_none());
3286	}
3287
3288	#[test]
3289	fn tree_route_works() {
3290		let backend = Backend::<Block>::new_test(1000, 100);
3291		let blockchain = backend.blockchain();
3292		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3293
3294		// fork from genesis: 3 prong.
3295		let a1 = insert_header(&backend, 1, block0, None, Default::default());
3296		let a2 = insert_header(&backend, 2, a1, None, Default::default());
3297		let a3 = insert_header(&backend, 3, a2, None, Default::default());
3298
3299		// fork from genesis: 2 prong.
3300		let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32]));
3301		let b2 = insert_header(&backend, 2, b1, None, Default::default());
3302
3303		{
3304			let tree_route = tree_route(blockchain, a1, a1).unwrap();
3305
3306			assert_eq!(tree_route.common_block().hash, a1);
3307			assert!(tree_route.retracted().is_empty());
3308			assert!(tree_route.enacted().is_empty());
3309		}
3310
3311		{
3312			let tree_route = tree_route(blockchain, a3, b2).unwrap();
3313
3314			assert_eq!(tree_route.common_block().hash, block0);
3315			assert_eq!(
3316				tree_route.retracted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3317				vec![a3, a2, a1]
3318			);
3319			assert_eq!(
3320				tree_route.enacted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3321				vec![b1, b2]
3322			);
3323		}
3324
3325		{
3326			let tree_route = tree_route(blockchain, a1, a3).unwrap();
3327
3328			assert_eq!(tree_route.common_block().hash, a1);
3329			assert!(tree_route.retracted().is_empty());
3330			assert_eq!(
3331				tree_route.enacted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3332				vec![a2, a3]
3333			);
3334		}
3335
3336		{
3337			let tree_route = tree_route(blockchain, a3, a1).unwrap();
3338
3339			assert_eq!(tree_route.common_block().hash, a1);
3340			assert_eq!(
3341				tree_route.retracted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3342				vec![a3, a2]
3343			);
3344			assert!(tree_route.enacted().is_empty());
3345		}
3346
3347		{
3348			let tree_route = tree_route(blockchain, a2, a2).unwrap();
3349
3350			assert_eq!(tree_route.common_block().hash, a2);
3351			assert!(tree_route.retracted().is_empty());
3352			assert!(tree_route.enacted().is_empty());
3353		}
3354	}
3355
3356	#[test]
3357	fn tree_route_child() {
3358		let backend = Backend::<Block>::new_test(1000, 100);
3359		let blockchain = backend.blockchain();
3360
3361		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3362		let block1 = insert_header(&backend, 1, block0, None, Default::default());
3363
3364		{
3365			let tree_route = tree_route(blockchain, block0, block1).unwrap();
3366
3367			assert_eq!(tree_route.common_block().hash, block0);
3368			assert!(tree_route.retracted().is_empty());
3369			assert_eq!(
3370				tree_route.enacted().iter().map(|r| r.hash).collect::<Vec<_>>(),
3371				vec![block1]
3372			);
3373		}
3374	}
3375
3376	#[test]
3377	fn lowest_common_ancestor_works() {
3378		let backend = Backend::<Block>::new_test(1000, 100);
3379		let blockchain = backend.blockchain();
3380		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3381
3382		// fork from genesis: 3 prong.
3383		let a1 = insert_header(&backend, 1, block0, None, Default::default());
3384		let a2 = insert_header(&backend, 2, a1, None, Default::default());
3385		let a3 = insert_header(&backend, 3, a2, None, Default::default());
3386
3387		// fork from genesis: 2 prong.
3388		let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32]));
3389		let b2 = insert_header(&backend, 2, b1, None, Default::default());
3390
3391		{
3392			let lca = lowest_common_ancestor(blockchain, a3, b2).unwrap();
3393
3394			assert_eq!(lca.hash, block0);
3395			assert_eq!(lca.number, 0);
3396		}
3397
3398		{
3399			let lca = lowest_common_ancestor(blockchain, a1, a3).unwrap();
3400
3401			assert_eq!(lca.hash, a1);
3402			assert_eq!(lca.number, 1);
3403		}
3404
3405		{
3406			let lca = lowest_common_ancestor(blockchain, a3, a1).unwrap();
3407
3408			assert_eq!(lca.hash, a1);
3409			assert_eq!(lca.number, 1);
3410		}
3411
3412		{
3413			let lca = lowest_common_ancestor(blockchain, a2, a3).unwrap();
3414
3415			assert_eq!(lca.hash, a2);
3416			assert_eq!(lca.number, 2);
3417		}
3418
3419		{
3420			let lca = lowest_common_ancestor(blockchain, a2, a1).unwrap();
3421
3422			assert_eq!(lca.hash, a1);
3423			assert_eq!(lca.number, 1);
3424		}
3425
3426		{
3427			let lca = lowest_common_ancestor(blockchain, a2, a2).unwrap();
3428
3429			assert_eq!(lca.hash, a2);
3430			assert_eq!(lca.number, 2);
3431		}
3432	}
3433
3434	#[test]
3435	fn displaced_leaves_after_finalizing_works_with_disconnect() {
3436		// In this test we will create a situation that can typically happen after warp sync.
3437		// The situation looks like this:
3438		// g -> <unimported> -> a3 -> a4
3439		// Basically there is a gap of unimported blocks at some point in the chain.
3440		let backend = Backend::<Block>::new_test(1000, 100);
3441		let blockchain = backend.blockchain();
3442		let genesis_number = 0;
3443		let genesis_hash =
3444			insert_header(&backend, genesis_number, Default::default(), None, Default::default());
3445
3446		let a3_number = 3;
3447		let a3_hash = insert_disconnected_header(
3448			&backend,
3449			a3_number,
3450			H256::from([200; 32]),
3451			H256::from([1; 32]),
3452			true,
3453		);
3454
3455		let a4_number = 4;
3456		let a4_hash =
3457			insert_disconnected_header(&backend, a4_number, a3_hash, H256::from([2; 32]), true);
3458		{
3459			let displaced = blockchain
3460				.displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([200; 32]))
3461				.unwrap();
3462			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, genesis_hash]);
3463			assert_eq!(displaced.displaced_leaves, vec![(genesis_number, genesis_hash)]);
3464			assert_eq!(displaced.displaced_blocks, vec![]);
3465		}
3466
3467		{
3468			let displaced = blockchain
3469				.displaced_leaves_after_finalizing(a4_hash, a4_number, a3_hash)
3470				.unwrap();
3471			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, genesis_hash]);
3472			assert_eq!(displaced.displaced_leaves, vec![(genesis_number, genesis_hash)]);
3473			assert_eq!(displaced.displaced_blocks, vec![]);
3474		}
3475
3476		// Import block a1 which has the genesis block as parent.
3477		// g -> a1 -> <unimported> -> a3(f) -> a4
3478		let a1_number = 1;
3479		let a1_hash = insert_disconnected_header(
3480			&backend,
3481			a1_number,
3482			genesis_hash,
3483			H256::from([123; 32]),
3484			false,
3485		);
3486		{
3487			let displaced = blockchain
3488				.displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([2; 32]))
3489				.unwrap();
3490			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, a1_hash]);
3491			assert_eq!(displaced.displaced_leaves, vec![]);
3492			assert_eq!(displaced.displaced_blocks, vec![]);
3493		}
3494
3495		// Import block b1 which has the genesis block as parent.
3496		// g -> a1 -> <unimported> -> a3(f) -> a4
3497		//  \-> b1
3498		let b1_number = 1;
3499		let b1_hash = insert_disconnected_header(
3500			&backend,
3501			b1_number,
3502			genesis_hash,
3503			H256::from([124; 32]),
3504			false,
3505		);
3506		{
3507			let displaced = blockchain
3508				.displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([2; 32]))
3509				.unwrap();
3510			assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, a1_hash, b1_hash]);
3511			assert_eq!(displaced.displaced_leaves, vec![]);
3512			assert_eq!(displaced.displaced_blocks, vec![]);
3513		}
3514
3515		// If branch of b blocks is higher in number than a branch, we
3516		// should still not prune disconnected leafs.
3517		// g -> a1 -> <unimported> -> a3(f) -> a4
3518		//  \-> b1 -> b2 ----------> b3 ----> b4 -> b5
3519		let b2_number = 2;
3520		let b2_hash =
3521			insert_disconnected_header(&backend, b2_number, b1_hash, H256::from([40; 32]), false);
3522		let b3_number = 3;
3523		let b3_hash =
3524			insert_disconnected_header(&backend, b3_number, b2_hash, H256::from([41; 32]), false);
3525		let b4_number = 4;
3526		let b4_hash =
3527			insert_disconnected_header(&backend, b4_number, b3_hash, H256::from([42; 32]), false);
3528		let b5_number = 5;
3529		let b5_hash =
3530			insert_disconnected_header(&backend, b5_number, b4_hash, H256::from([43; 32]), false);
3531		{
3532			let displaced = blockchain
3533				.displaced_leaves_after_finalizing(a3_hash, a3_number, H256::from([2; 32]))
3534				.unwrap();
3535			assert_eq!(blockchain.leaves().unwrap(), vec![b5_hash, a4_hash, a1_hash]);
3536			assert_eq!(displaced.displaced_leaves, vec![]);
3537			assert_eq!(displaced.displaced_blocks, vec![]);
3538		}
3539
3540		// Even though there is a disconnect, diplace should still detect
3541		// branches above the block gap.
3542		//                              /-> c4
3543		// g -> a1 -> <unimported> -> a3 -> a4(f)
3544		//  \-> b1 -> b2 ----------> b3 -> b4 -> b5
3545		let c4_number = 4;
3546		let c4_hash =
3547			insert_disconnected_header(&backend, c4_number, a3_hash, H256::from([44; 32]), false);
3548		{
3549			let displaced = blockchain
3550				.displaced_leaves_after_finalizing(a4_hash, a4_number, a3_hash)
3551				.unwrap();
3552			assert_eq!(blockchain.leaves().unwrap(), vec![b5_hash, a4_hash, c4_hash, a1_hash]);
3553			assert_eq!(displaced.displaced_leaves, vec![(c4_number, c4_hash)]);
3554			assert_eq!(displaced.displaced_blocks, vec![c4_hash]);
3555		}
3556	}
3557
3558	#[test]
3559	fn disconnected_blocks_do_not_become_leaves_and_warp_sync_scenario() {
3560		// Simulate a realistic case:
3561		//
3562		// 1. Import genesis (block #0) normally — becomes a leaf.
3563		// 2. Import warp sync proof blocks at #5, #10, #15 without leaf registration. Their parents
3564		//    are NOT in the DB. They must NOT appear as leaves.
3565		// 3. Import block #20 as Final. Its parent (#19) is not in the DB. Being Final, it updates
3566		//    finalized number to 20.
3567		// 4. Import blocks #1..#19 with Normal state (gap sync). Since last_finalized_num is now 20
3568		//    and each block number < 20, the leaf condition (number > last_finalized_num ||
3569		//    last_finalized_num.is_zero()) is FALSE — they must NOT become leaves.
3570		// 5. Assert throughout and verify displaced_leaves_after_finalizing works cleanly with no
3571		//    disconnected proof blocks in the displaced list.
3572
3573		let backend = Backend::<Block>::new_test(1000, 100);
3574		let blockchain = backend.blockchain();
3575
3576		let insert_block_raw = |number: u64,
3577		                        parent_hash: H256,
3578		                        ext_root: H256,
3579		                        state: NewBlockState,
3580		                        register_as_leaf: bool|
3581		 -> H256 {
3582			use subsoil::runtime::testing::Digest;
3583			let digest = Digest::default();
3584			let header = Header {
3585				number,
3586				parent_hash,
3587				state_root: Default::default(),
3588				digest,
3589				extrinsics_root: ext_root,
3590			};
3591			let mut op = backend.begin_operation().unwrap();
3592			op.set_block_data(header.clone(), Some(vec![]), None, None, state, register_as_leaf)
3593				.unwrap();
3594			backend.commit_operation(op).unwrap();
3595			header.hash()
3596		};
3597
3598		// --- Step 1: import genesis ---
3599		let genesis_hash = insert_header(&backend, 0, Default::default(), None, Default::default());
3600		assert_eq!(blockchain.leaves().unwrap(), vec![genesis_hash]);
3601
3602		// --- Step 2: import warp sync proof blocks without leaf registration ---
3603		// These simulate authority-set-change blocks from the warp sync proof.
3604		// Their parents are NOT in the DB.
3605		let _proof5_hash = insert_block_raw(
3606			5,
3607			H256::from([5; 32]),
3608			H256::from([50; 32]),
3609			NewBlockState::Normal,
3610			false,
3611		);
3612		let _proof10_hash = insert_block_raw(
3613			10,
3614			H256::from([10; 32]),
3615			H256::from([100; 32]),
3616			NewBlockState::Normal,
3617			false,
3618		);
3619		let _proof15_hash = insert_block_raw(
3620			15,
3621			H256::from([15; 32]),
3622			H256::from([150; 32]),
3623			NewBlockState::Normal,
3624			false,
3625		);
3626
3627		// Leaves must still only contain genesis.
3628		assert_eq!(blockchain.leaves().unwrap(), vec![genesis_hash]);
3629
3630		// The disconnected blocks should still be retrievable from the DB.
3631		assert!(blockchain.header(_proof5_hash).unwrap().is_some());
3632		assert!(blockchain.header(_proof10_hash).unwrap().is_some());
3633		assert!(blockchain.header(_proof15_hash).unwrap().is_some());
3634
3635		// --- Step 3: import warp sync target block #20 as Final ---
3636		// Parent (#19) is not in the DB. Use the same low-level approach but with
3637		// NewBlockState::Final. Being Final, it will be set as best + finalized.
3638		let block20_hash = insert_block_raw(
3639			20,
3640			H256::from([19; 32]),
3641			H256::from([200; 32]),
3642			NewBlockState::Final,
3643			true,
3644		);
3645
3646		// Block #20 should now be a leaf (it's best and finalized).
3647		let leaves = blockchain.leaves().unwrap();
3648		assert!(leaves.contains(&block20_hash));
3649		// Verify finalized number was updated to 20.
3650		assert_eq!(blockchain.info().finalized_number, 20);
3651		assert_eq!(blockchain.info().finalized_hash, block20_hash);
3652		// Disconnected proof blocks must still not be leaves.
3653		assert!(!leaves.contains(&_proof5_hash));
3654		assert!(!leaves.contains(&_proof10_hash));
3655		assert!(!leaves.contains(&_proof15_hash));
3656
3657		// --- Step 4: import gap sync blocks #1..#19 with Normal state ---
3658		// Since last_finalized_num is 20, each block with number < 20 should NOT
3659		// become a leaf (the condition `number > last_finalized_num` is false).
3660		// Build the chain: genesis -> #1 -> #2 -> ... -> #19.
3661		let mut prev_hash = genesis_hash;
3662		let mut gap_hashes = Vec::new();
3663		for n in 1..=19 {
3664			let h = insert_disconnected_header(&backend, n, prev_hash, Default::default(), false);
3665			gap_hashes.push(h);
3666			prev_hash = h;
3667		}
3668
3669		// Verify gap sync blocks did NOT create new leaves.
3670		let leaves = blockchain.leaves().unwrap();
3671		for (i, gap_hash) in gap_hashes.iter().enumerate() {
3672			assert!(
3673				!leaves.contains(gap_hash),
3674				"Gap sync block #{} should not be a leaf, but it is",
3675				i + 1,
3676			);
3677		}
3678		// Block #20 should still be a leaf.
3679		assert!(leaves.contains(&block20_hash));
3680		// Disconnected proof blocks must still not be leaves.
3681		assert!(!leaves.contains(&_proof5_hash));
3682		assert!(!leaves.contains(&_proof10_hash));
3683		assert!(!leaves.contains(&_proof15_hash));
3684
3685		// --- Step 5: verify displaced_leaves_after_finalizing works cleanly ---
3686		// Call it for block #20 to verify no disconnected proof blocks appear
3687		// in the displaced list and it completes without errors.
3688		{
3689			let displaced = blockchain
3690				.displaced_leaves_after_finalizing(
3691					block20_hash,
3692					20,
3693					H256::from([19; 32]), // parent hash of block #20
3694				)
3695				.unwrap();
3696			// Disconnected proof blocks were never leaves, so they must not
3697			// appear in displaced_leaves.
3698			assert!(!displaced.displaced_leaves.iter().any(|(_, h)| *h == _proof5_hash),);
3699			assert!(!displaced.displaced_leaves.iter().any(|(_, h)| *h == _proof10_hash),);
3700			assert!(!displaced.displaced_leaves.iter().any(|(_, h)| *h == _proof15_hash),);
3701			// None of the gap sync blocks should be displaced leaves either
3702			// (they were never added as leaves).
3703			for gap_hash in &gap_hashes {
3704				assert!(!displaced.displaced_leaves.iter().any(|(_, h)| h == gap_hash),);
3705			}
3706		}
3707	}
3708
3709	#[test]
3710	fn displaced_leaves_after_finalizing_works() {
3711		let backend = Backend::<Block>::new_test(1000, 100);
3712		let blockchain = backend.blockchain();
3713		let genesis_number = 0;
3714		let genesis_hash =
3715			insert_header(&backend, genesis_number, Default::default(), None, Default::default());
3716
3717		// fork from genesis: 3 prong.
3718		// block 0 -> a1 -> a2 -> a3
3719		//        \
3720		//         -> b1 -> b2 -> c1 -> c2
3721		//              \
3722		//               -> d1 -> d2
3723		let a1_number = 1;
3724		let a1_hash = insert_header(&backend, a1_number, genesis_hash, None, Default::default());
3725		let a2_number = 2;
3726		let a2_hash = insert_header(&backend, a2_number, a1_hash, None, Default::default());
3727		let a3_number = 3;
3728		let a3_hash = insert_header(&backend, a3_number, a2_hash, None, Default::default());
3729
3730		{
3731			let displaced = blockchain
3732				.displaced_leaves_after_finalizing(genesis_hash, genesis_number, Default::default())
3733				.unwrap();
3734			assert_eq!(displaced.displaced_leaves, vec![]);
3735			assert_eq!(displaced.displaced_blocks, vec![]);
3736		}
3737		{
3738			let displaced_a1 = blockchain
3739				.displaced_leaves_after_finalizing(a1_hash, a1_number, genesis_hash)
3740				.unwrap();
3741			assert_eq!(displaced_a1.displaced_leaves, vec![]);
3742			assert_eq!(displaced_a1.displaced_blocks, vec![]);
3743
3744			let displaced_a2 = blockchain
3745				.displaced_leaves_after_finalizing(a2_hash, a2_number, a1_hash)
3746				.unwrap();
3747			assert_eq!(displaced_a2.displaced_leaves, vec![]);
3748			assert_eq!(displaced_a2.displaced_blocks, vec![]);
3749
3750			let displaced_a3 = blockchain
3751				.displaced_leaves_after_finalizing(a3_hash, a3_number, a2_hash)
3752				.unwrap();
3753			assert_eq!(displaced_a3.displaced_leaves, vec![]);
3754			assert_eq!(displaced_a3.displaced_blocks, vec![]);
3755		}
3756		{
3757			// Finalized block is above leaves and not imported yet.
3758			// We will not be able to make a connection,
3759			// nothing can be marked as displaced.
3760			let displaced = blockchain
3761				.displaced_leaves_after_finalizing(H256::from([57; 32]), 10, H256::from([56; 32]))
3762				.unwrap();
3763			assert_eq!(displaced.displaced_leaves, vec![]);
3764			assert_eq!(displaced.displaced_blocks, vec![]);
3765		}
3766
3767		// fork from genesis: 2 prong.
3768		let b1_number = 1;
3769		let b1_hash = insert_header(&backend, b1_number, genesis_hash, None, H256::from([1; 32]));
3770		let b2_number = 2;
3771		let b2_hash = insert_header(&backend, b2_number, b1_hash, None, Default::default());
3772
3773		// fork from b2.
3774		let c1_number = 3;
3775		let c1_hash = insert_header(&backend, c1_number, b2_hash, None, H256::from([2; 32]));
3776		let c2_number = 4;
3777		let c2_hash = insert_header(&backend, c2_number, c1_hash, None, Default::default());
3778
3779		// fork from b1.
3780		let d1_number = 2;
3781		let d1_hash = insert_header(&backend, d1_number, b1_hash, None, H256::from([3; 32]));
3782		let d2_number = 3;
3783		let d2_hash = insert_header(&backend, d2_number, d1_hash, None, Default::default());
3784
3785		{
3786			let displaced_a1 = blockchain
3787				.displaced_leaves_after_finalizing(a1_hash, a1_number, genesis_hash)
3788				.unwrap();
3789			assert_eq!(
3790				displaced_a1.displaced_leaves,
3791				vec![(c2_number, c2_hash), (d2_number, d2_hash)]
3792			);
3793			let mut displaced_blocks = vec![b1_hash, b2_hash, c1_hash, c2_hash, d1_hash, d2_hash];
3794			displaced_blocks.sort();
3795			assert_eq!(displaced_a1.displaced_blocks, displaced_blocks);
3796
3797			let displaced_a2 = blockchain
3798				.displaced_leaves_after_finalizing(a2_hash, a2_number, a1_hash)
3799				.unwrap();
3800			assert_eq!(displaced_a1.displaced_leaves, displaced_a2.displaced_leaves);
3801			assert_eq!(displaced_a1.displaced_blocks, displaced_a2.displaced_blocks);
3802
3803			let displaced_a3 = blockchain
3804				.displaced_leaves_after_finalizing(a3_hash, a3_number, a2_hash)
3805				.unwrap();
3806			assert_eq!(displaced_a1.displaced_leaves, displaced_a3.displaced_leaves);
3807			assert_eq!(displaced_a1.displaced_blocks, displaced_a3.displaced_blocks);
3808		}
3809		{
3810			let displaced = blockchain
3811				.displaced_leaves_after_finalizing(b1_hash, b1_number, genesis_hash)
3812				.unwrap();
3813			assert_eq!(displaced.displaced_leaves, vec![(a3_number, a3_hash)]);
3814			let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash];
3815			displaced_blocks.sort();
3816			assert_eq!(displaced.displaced_blocks, displaced_blocks);
3817		}
3818		{
3819			let displaced = blockchain
3820				.displaced_leaves_after_finalizing(b2_hash, b2_number, b1_hash)
3821				.unwrap();
3822			assert_eq!(
3823				displaced.displaced_leaves,
3824				vec![(a3_number, a3_hash), (d2_number, d2_hash)]
3825			);
3826			let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash, d1_hash, d2_hash];
3827			displaced_blocks.sort();
3828			assert_eq!(displaced.displaced_blocks, displaced_blocks);
3829		}
3830		{
3831			let displaced = blockchain
3832				.displaced_leaves_after_finalizing(c2_hash, c2_number, c1_hash)
3833				.unwrap();
3834			assert_eq!(
3835				displaced.displaced_leaves,
3836				vec![(a3_number, a3_hash), (d2_number, d2_hash)]
3837			);
3838			let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash, d1_hash, d2_hash];
3839			displaced_blocks.sort();
3840			assert_eq!(displaced.displaced_blocks, displaced_blocks);
3841		}
3842	}
3843
3844	#[test]
3845	fn test_tree_route_regression() {
3846		// NOTE: this is a test for a regression introduced in #3665, the result
3847		// of tree_route would be erroneously computed, since it was taking into
3848		// account the `ancestor` in `CachedHeaderMetadata` for the comparison.
3849		// in this test we simulate the same behavior with the side-effect
3850		// triggering the issue being eviction of a previously fetched record
3851		// from the cache, therefore this test is dependent on the LRU cache
3852		// size for header metadata, which is currently set to 5000 elements.
3853		let backend = Backend::<Block>::new_test(10000, 10000);
3854		let blockchain = backend.blockchain();
3855
3856		let genesis = insert_header(&backend, 0, Default::default(), None, Default::default());
3857
3858		let block100 = (1..=100).fold(genesis, |parent, n| {
3859			insert_header(&backend, n, parent, None, Default::default())
3860		});
3861
3862		let block7000 = (101..=7000).fold(block100, |parent, n| {
3863			insert_header(&backend, n, parent, None, Default::default())
3864		});
3865
3866		// This will cause the ancestor of `block100` to be set to `genesis` as a side-effect.
3867		lowest_common_ancestor(blockchain, genesis, block100).unwrap();
3868
3869		// While traversing the tree we will have to do 6900 calls to
3870		// `header_metadata`, which will make sure we will exhaust our cache
3871		// which only takes 5000 elements. In particular, the `CachedHeaderMetadata` struct for
3872		// block #100 will be evicted and will get a new value (with ancestor set to its parent).
3873		let tree_route = tree_route(blockchain, block100, block7000).unwrap();
3874
3875		assert!(tree_route.retracted().is_empty());
3876	}
3877
3878	// Runtime-backed integration tests for the backend live in the `soil-test`
3879	// crate to keep `soil-client` free of the `soil-test-node-runtime-client`
3880	// dev-dependency.
3881
3882	#[test]
3883	fn test_leaves_pruned_on_finality() {
3884		//   / 1b - 2b - 3b
3885		// 0 - 1a - 2a
3886		//   \ 1c
3887		let backend: Backend<Block> = Backend::new_test(10, 10);
3888		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3889
3890		let block1_a = insert_header(&backend, 1, block0, None, Default::default());
3891		let block1_b = insert_header(&backend, 1, block0, None, [1; 32].into());
3892		let block1_c = insert_header(&backend, 1, block0, None, [2; 32].into());
3893
3894		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block1_a, block1_b, block1_c]);
3895
3896		let block2_a = insert_header(&backend, 2, block1_a, None, Default::default());
3897		let block2_b = insert_header(&backend, 2, block1_b, None, Default::default());
3898
3899		let block3_b = insert_header(&backend, 3, block2_b, None, [3; 32].into());
3900
3901		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block3_b, block2_a, block1_c]);
3902
3903		backend.finalize_block(block1_a, None).unwrap();
3904		backend.finalize_block(block2_a, None).unwrap();
3905
3906		// All leaves are pruned that are known to not belong to canonical branch
3907		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]);
3908	}
3909
3910	#[test]
3911	fn test_aux() {
3912		let backend: Backend<Block> = Backend::new_test(0, 0);
3913		assert!(backend.get_aux(b"test").unwrap().is_none());
3914		backend.insert_aux(&[(&b"test"[..], &b"hello"[..])], &[]).unwrap();
3915		assert_eq!(b"hello", &backend.get_aux(b"test").unwrap().unwrap()[..]);
3916		backend.insert_aux(&[], &[&b"test"[..]]).unwrap();
3917		assert!(backend.get_aux(b"test").unwrap().is_none());
3918	}
3919
3920	#[test]
3921	fn test_finalize_block_with_justification() {
3922		use crate::client_api::blockchain::Backend as BlockChainBackend;
3923
3924		let backend = Backend::<Block>::new_test(10, 10);
3925
3926		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3927		let block1 = insert_header(&backend, 1, block0, None, Default::default());
3928
3929		let justification = Some((CONS0_ENGINE_ID, vec![1, 2, 3]));
3930		backend.finalize_block(block1, justification.clone()).unwrap();
3931
3932		assert_eq!(
3933			backend.blockchain().justifications(block1).unwrap(),
3934			justification.map(Justifications::from),
3935		);
3936	}
3937
3938	#[test]
3939	fn test_append_justification_to_finalized_block() {
3940		use crate::client_api::blockchain::Backend as BlockChainBackend;
3941
3942		let backend = Backend::<Block>::new_test(10, 10);
3943
3944		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3945		let block1 = insert_header(&backend, 1, block0, None, Default::default());
3946
3947		let just0 = (CONS0_ENGINE_ID, vec![1, 2, 3]);
3948		backend.finalize_block(block1, Some(just0.clone().into())).unwrap();
3949
3950		let just1 = (CONS1_ENGINE_ID, vec![4, 5]);
3951		backend.append_justification(block1, just1.clone()).unwrap();
3952
3953		let just2 = (CONS1_ENGINE_ID, vec![6, 7]);
3954		assert!(matches!(
3955			backend.append_justification(block1, just2),
3956			Err(ClientError::BadJustification(_))
3957		));
3958
3959		let justifications = {
3960			let mut just = Justifications::from(just0);
3961			just.append(just1);
3962			just
3963		};
3964		assert_eq!(backend.blockchain().justifications(block1).unwrap(), Some(justifications),);
3965	}
3966
3967	#[test]
3968	fn test_finalize_multiple_blocks_in_single_op() {
3969		let backend = Backend::<Block>::new_test(10, 10);
3970
3971		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
3972		let block1 = insert_header(&backend, 1, block0, None, Default::default());
3973		let block2 = insert_header(&backend, 2, block1, None, Default::default());
3974		let block3 = insert_header(&backend, 3, block2, None, Default::default());
3975		let block4 = insert_header(&backend, 4, block3, None, Default::default());
3976		{
3977			let mut op = backend.begin_operation().unwrap();
3978			backend.begin_state_operation(&mut op, block0).unwrap();
3979			op.mark_finalized(block1, None).unwrap();
3980			op.mark_finalized(block2, None).unwrap();
3981			backend.commit_operation(op).unwrap();
3982		}
3983		{
3984			let mut op = backend.begin_operation().unwrap();
3985			backend.begin_state_operation(&mut op, block2).unwrap();
3986			op.mark_finalized(block3, None).unwrap();
3987			op.mark_finalized(block4, None).unwrap();
3988			backend.commit_operation(op).unwrap();
3989		}
3990	}
3991
3992	#[test]
3993	fn storage_hash_is_cached_correctly() {
3994		let state_version = StateVersion::default();
3995		let backend = Backend::<Block>::new_test(10, 10);
3996
3997		let hash0 = {
3998			let mut op = backend.begin_operation().unwrap();
3999			backend.begin_state_operation(&mut op, Default::default()).unwrap();
4000			let mut header = Header {
4001				number: 0,
4002				parent_hash: Default::default(),
4003				state_root: Default::default(),
4004				digest: Default::default(),
4005				extrinsics_root: Default::default(),
4006			};
4007
4008			let storage = vec![(b"test".to_vec(), b"test".to_vec())];
4009
4010			header.state_root = op
4011				.old_state
4012				.storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..]))), state_version)
4013				.0
4014				.into();
4015			let hash = header.hash();
4016
4017			op.reset_storage(
4018				Storage {
4019					top: storage.into_iter().collect(),
4020					children_default: Default::default(),
4021				},
4022				state_version,
4023			)
4024			.unwrap();
4025			op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best, true)
4026				.unwrap();
4027
4028			backend.commit_operation(op).unwrap();
4029
4030			hash
4031		};
4032
4033		let block0_hash = backend
4034			.state_at(hash0, TrieCacheContext::Untrusted)
4035			.unwrap()
4036			.storage_hash(&b"test"[..])
4037			.unwrap();
4038
4039		let hash1 = {
4040			let mut op = backend.begin_operation().unwrap();
4041			backend.begin_state_operation(&mut op, hash0).unwrap();
4042			let mut header = Header {
4043				number: 1,
4044				parent_hash: hash0,
4045				state_root: Default::default(),
4046				digest: Default::default(),
4047				extrinsics_root: Default::default(),
4048			};
4049
4050			let storage = vec![(b"test".to_vec(), Some(b"test2".to_vec()))];
4051
4052			let (root, overlay) = op.old_state.storage_root(
4053				storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
4054				state_version,
4055			);
4056			op.update_db_storage(overlay).unwrap();
4057			header.state_root = root.into();
4058			let hash = header.hash();
4059
4060			op.update_storage(storage, Vec::new()).unwrap();
4061			op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Normal, true)
4062				.unwrap();
4063
4064			backend.commit_operation(op).unwrap();
4065
4066			hash
4067		};
4068
4069		{
4070			let header = backend.blockchain().header(hash1).unwrap().unwrap();
4071			let mut op = backend.begin_operation().unwrap();
4072			op.set_block_data(header, None, None, None, NewBlockState::Best, true).unwrap();
4073			backend.commit_operation(op).unwrap();
4074		}
4075
4076		let block1_hash = backend
4077			.state_at(hash1, TrieCacheContext::Untrusted)
4078			.unwrap()
4079			.storage_hash(&b"test"[..])
4080			.unwrap();
4081
4082		assert_ne!(block0_hash, block1_hash);
4083	}
4084
4085	#[test]
4086	fn test_finalize_non_sequential() {
4087		let backend = Backend::<Block>::new_test(10, 10);
4088
4089		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4090		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4091		let block2 = insert_header(&backend, 2, block1, None, Default::default());
4092		{
4093			let mut op = backend.begin_operation().unwrap();
4094			backend.begin_state_operation(&mut op, block0).unwrap();
4095			op.mark_finalized(block2, None).unwrap();
4096			backend.commit_operation(op).unwrap_err();
4097		}
4098	}
4099
4100	#[test]
4101	fn prune_blocks_on_finalize() {
4102		let pruning_modes =
4103			vec![BlocksPruning::Some(2), BlocksPruning::KeepFinalized, BlocksPruning::KeepAll];
4104
4105		for pruning_mode in pruning_modes {
4106			let backend = Backend::<Block>::new_test_with_tx_storage(pruning_mode, 0);
4107			let mut blocks = Vec::new();
4108			let mut prev_hash = Default::default();
4109			for i in 0..5 {
4110				let hash = insert_block(
4111					&backend,
4112					i,
4113					prev_hash,
4114					None,
4115					Default::default(),
4116					vec![UncheckedXt::new_transaction(i.into(), ())],
4117					None,
4118				)
4119				.unwrap();
4120				blocks.push(hash);
4121				prev_hash = hash;
4122			}
4123
4124			{
4125				let mut op = backend.begin_operation().unwrap();
4126				backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4127				for i in 1..5 {
4128					op.mark_finalized(blocks[i], None).unwrap();
4129				}
4130				backend.commit_operation(op).unwrap();
4131			}
4132			let bc = backend.blockchain();
4133
4134			if matches!(pruning_mode, BlocksPruning::Some(_)) {
4135				assert_eq!(None, bc.body(blocks[0]).unwrap());
4136				assert_eq!(None, bc.body(blocks[1]).unwrap());
4137				assert_eq!(None, bc.body(blocks[2]).unwrap());
4138				assert_eq!(
4139					Some(vec![UncheckedXt::new_transaction(3.into(), ())]),
4140					bc.body(blocks[3]).unwrap()
4141				);
4142				assert_eq!(
4143					Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
4144					bc.body(blocks[4]).unwrap()
4145				);
4146			} else {
4147				for i in 0..5 {
4148					assert_eq!(
4149						Some(vec![UncheckedXt::new_transaction((i as u64).into(), ())]),
4150						bc.body(blocks[i]).unwrap()
4151					);
4152				}
4153			}
4154		}
4155	}
4156
4157	#[test]
4158	fn prune_blocks_on_finalize_with_fork() {
4159		subsoil::tracing::try_init_simple();
4160
4161		let pruning_modes =
4162			vec![BlocksPruning::Some(2), BlocksPruning::KeepFinalized, BlocksPruning::KeepAll];
4163
4164		for pruning in pruning_modes {
4165			let backend = Backend::<Block>::new_test_with_tx_storage(pruning, 10);
4166			let mut blocks = Vec::new();
4167			let mut prev_hash = Default::default();
4168			for i in 0..5 {
4169				let hash = insert_block(
4170					&backend,
4171					i,
4172					prev_hash,
4173					None,
4174					Default::default(),
4175					vec![UncheckedXt::new_transaction(i.into(), ())],
4176					None,
4177				)
4178				.unwrap();
4179				blocks.push(hash);
4180				prev_hash = hash;
4181			}
4182
4183			// insert a fork at block 2
4184			let fork_hash_root = insert_block(
4185				&backend,
4186				2,
4187				blocks[1],
4188				None,
4189				H256::random(),
4190				vec![UncheckedXt::new_transaction(2.into(), ())],
4191				None,
4192			)
4193			.unwrap();
4194			insert_block(
4195				&backend,
4196				3,
4197				fork_hash_root,
4198				None,
4199				H256::random(),
4200				vec![
4201					UncheckedXt::new_transaction(3.into(), ()),
4202					UncheckedXt::new_transaction(11.into(), ()),
4203				],
4204				None,
4205			)
4206			.unwrap();
4207			let mut op = backend.begin_operation().unwrap();
4208			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4209			op.mark_head(blocks[4]).unwrap();
4210			backend.commit_operation(op).unwrap();
4211
4212			let bc = backend.blockchain();
4213			assert_eq!(
4214				Some(vec![UncheckedXt::new_transaction(2.into(), ())]),
4215				bc.body(fork_hash_root).unwrap()
4216			);
4217
4218			for i in 1..5 {
4219				let mut op = backend.begin_operation().unwrap();
4220				backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4221				op.mark_finalized(blocks[i], None).unwrap();
4222				backend.commit_operation(op).unwrap();
4223			}
4224
4225			if matches!(pruning, BlocksPruning::Some(_)) {
4226				assert_eq!(None, bc.body(blocks[0]).unwrap());
4227				assert_eq!(None, bc.body(blocks[1]).unwrap());
4228				assert_eq!(None, bc.body(blocks[2]).unwrap());
4229
4230				assert_eq!(
4231					Some(vec![UncheckedXt::new_transaction(3.into(), ())]),
4232					bc.body(blocks[3]).unwrap()
4233				);
4234				assert_eq!(
4235					Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
4236					bc.body(blocks[4]).unwrap()
4237				);
4238			} else {
4239				for i in 0..5 {
4240					assert_eq!(
4241						Some(vec![UncheckedXt::new_transaction((i as u64).into(), ())]),
4242						bc.body(blocks[i]).unwrap()
4243					);
4244				}
4245			}
4246
4247			if matches!(pruning, BlocksPruning::KeepAll) {
4248				assert_eq!(
4249					Some(vec![UncheckedXt::new_transaction(2.into(), ())]),
4250					bc.body(fork_hash_root).unwrap()
4251				);
4252			} else {
4253				assert_eq!(None, bc.body(fork_hash_root).unwrap());
4254			}
4255
4256			assert_eq!(bc.info().best_number, 4);
4257			for i in 0..5 {
4258				assert!(bc.hash(i).unwrap().is_some());
4259			}
4260		}
4261	}
4262
4263	#[test]
4264	fn prune_blocks_on_finalize_and_reorg() {
4265		// 	0 - 1b
4266		// 	\ - 1a - 2a - 3a
4267		// 	     \ - 2b
4268
4269		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(10), 10);
4270
4271		let make_block = |index, parent, val: u64| {
4272			insert_block(
4273				&backend,
4274				index,
4275				parent,
4276				None,
4277				H256::random(),
4278				vec![UncheckedXt::new_transaction(val.into(), ())],
4279				None,
4280			)
4281			.unwrap()
4282		};
4283
4284		let block_0 = make_block(0, Default::default(), 0x00);
4285		let block_1a = make_block(1, block_0, 0x1a);
4286		let block_1b = make_block(1, block_0, 0x1b);
4287		let block_2a = make_block(2, block_1a, 0x2a);
4288		let block_2b = make_block(2, block_1a, 0x2b);
4289		let block_3a = make_block(3, block_2a, 0x3a);
4290
4291		// Make sure 1b is head
4292		let mut op = backend.begin_operation().unwrap();
4293		backend.begin_state_operation(&mut op, block_0).unwrap();
4294		op.mark_head(block_1b).unwrap();
4295		backend.commit_operation(op).unwrap();
4296
4297		// Finalize 3a
4298		let mut op = backend.begin_operation().unwrap();
4299		backend.begin_state_operation(&mut op, block_0).unwrap();
4300		op.mark_head(block_3a).unwrap();
4301		op.mark_finalized(block_1a, None).unwrap();
4302		op.mark_finalized(block_2a, None).unwrap();
4303		op.mark_finalized(block_3a, None).unwrap();
4304		backend.commit_operation(op).unwrap();
4305
4306		let bc = backend.blockchain();
4307		assert_eq!(None, bc.body(block_1b).unwrap());
4308		assert_eq!(None, bc.body(block_2b).unwrap());
4309		assert_eq!(
4310			Some(vec![UncheckedXt::new_transaction(0x00.into(), ())]),
4311			bc.body(block_0).unwrap()
4312		);
4313		assert_eq!(
4314			Some(vec![UncheckedXt::new_transaction(0x1a.into(), ())]),
4315			bc.body(block_1a).unwrap()
4316		);
4317		assert_eq!(
4318			Some(vec![UncheckedXt::new_transaction(0x2a.into(), ())]),
4319			bc.body(block_2a).unwrap()
4320		);
4321		assert_eq!(
4322			Some(vec![UncheckedXt::new_transaction(0x3a.into(), ())]),
4323			bc.body(block_3a).unwrap()
4324		);
4325	}
4326
4327	#[test]
4328	fn indexed_data_block_body() {
4329		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
4330
4331		let x0 = UncheckedXt::new_transaction(0.into(), ()).encode();
4332		let x1 = UncheckedXt::new_transaction(1.into(), ()).encode();
4333		let x0_hash = <HashingFor<Block> as subsoil::core::Hasher>::hash(&x0[1..]);
4334		let x1_hash = <HashingFor<Block> as subsoil::core::Hasher>::hash(&x1[1..]);
4335		let index = vec![
4336			IndexOperation::Insert {
4337				extrinsic: 0,
4338				hash: x0_hash.as_ref().to_vec(),
4339				size: (x0.len() - 1) as u32,
4340			},
4341			IndexOperation::Insert {
4342				extrinsic: 1,
4343				hash: x1_hash.as_ref().to_vec(),
4344				size: (x1.len() - 1) as u32,
4345			},
4346		];
4347		let hash = insert_block(
4348			&backend,
4349			0,
4350			Default::default(),
4351			None,
4352			Default::default(),
4353			vec![
4354				UncheckedXt::new_transaction(0.into(), ()),
4355				UncheckedXt::new_transaction(1.into(), ()),
4356			],
4357			Some(index),
4358		)
4359		.unwrap();
4360		let bc = backend.blockchain();
4361		assert_eq!(bc.indexed_transaction(x0_hash).unwrap().unwrap(), &x0[1..]);
4362		assert_eq!(bc.indexed_transaction(x1_hash).unwrap().unwrap(), &x1[1..]);
4363
4364		let hashof0 = bc.info().genesis_hash;
4365		// Push one more blocks and make sure block is pruned and transaction index is cleared.
4366		let block1 =
4367			insert_block(&backend, 1, hash, None, Default::default(), vec![], None).unwrap();
4368		backend.finalize_block(block1, None).unwrap();
4369		assert_eq!(bc.body(hashof0).unwrap(), None);
4370		assert_eq!(bc.indexed_transaction(x0_hash).unwrap(), None);
4371		assert_eq!(bc.indexed_transaction(x1_hash).unwrap(), None);
4372	}
4373
4374	#[test]
4375	fn index_invalid_size() {
4376		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
4377
4378		let x0 = UncheckedXt::new_transaction(0.into(), ()).encode();
4379		let x1 = UncheckedXt::new_transaction(1.into(), ()).encode();
4380
4381		let x0_hash = <HashingFor<Block> as subsoil::core::Hasher>::hash(&x0[..]);
4382		let x1_hash = <HashingFor<Block> as subsoil::core::Hasher>::hash(&x1[..]);
4383		let index = vec![
4384			IndexOperation::Insert {
4385				extrinsic: 0,
4386				hash: x0_hash.as_ref().to_vec(),
4387				size: (x0.len()) as u32,
4388			},
4389			IndexOperation::Insert {
4390				extrinsic: 1,
4391				hash: x1_hash.as_ref().to_vec(),
4392				size: (x1.len() + 1) as u32,
4393			},
4394		];
4395		insert_block(
4396			&backend,
4397			0,
4398			Default::default(),
4399			None,
4400			Default::default(),
4401			vec![
4402				UncheckedXt::new_transaction(0.into(), ()),
4403				UncheckedXt::new_transaction(1.into(), ()),
4404			],
4405			Some(index),
4406		)
4407		.unwrap();
4408		let bc = backend.blockchain();
4409		assert_eq!(bc.indexed_transaction(x0_hash).unwrap().unwrap(), &x0[..]);
4410		assert_eq!(bc.indexed_transaction(x1_hash).unwrap(), None);
4411	}
4412
4413	#[test]
4414	fn renew_transaction_storage() {
4415		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
4416		let mut blocks = Vec::new();
4417		let mut prev_hash = Default::default();
4418		let x1 = UncheckedXt::new_transaction(0.into(), ()).encode();
4419		let x1_hash = <HashingFor<Block> as subsoil::core::Hasher>::hash(&x1[1..]);
4420		for i in 0..10 {
4421			let mut index = Vec::new();
4422			if i == 0 {
4423				index.push(IndexOperation::Insert {
4424					extrinsic: 0,
4425					hash: x1_hash.as_ref().to_vec(),
4426					size: (x1.len() - 1) as u32,
4427				});
4428			} else if i < 5 {
4429				// keep renewing 1st
4430				index.push(IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() });
4431			} // else stop renewing
4432			let hash = insert_block(
4433				&backend,
4434				i,
4435				prev_hash,
4436				None,
4437				Default::default(),
4438				vec![UncheckedXt::new_transaction(i.into(), ())],
4439				Some(index),
4440			)
4441			.unwrap();
4442			blocks.push(hash);
4443			prev_hash = hash;
4444		}
4445
4446		for i in 1..10 {
4447			let mut op = backend.begin_operation().unwrap();
4448			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
4449			op.mark_finalized(blocks[i], None).unwrap();
4450			backend.commit_operation(op).unwrap();
4451			let bc = backend.blockchain();
4452			if i < 6 {
4453				assert!(bc.indexed_transaction(x1_hash).unwrap().is_some());
4454			} else {
4455				assert!(bc.indexed_transaction(x1_hash).unwrap().is_none());
4456			}
4457		}
4458	}
4459
4460	#[test]
4461	fn remove_leaf_block_works() {
4462		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
4463		let mut blocks = Vec::new();
4464		let mut prev_hash = Default::default();
4465		for i in 0..2 {
4466			let hash = insert_block(
4467				&backend,
4468				i,
4469				prev_hash,
4470				None,
4471				Default::default(),
4472				vec![UncheckedXt::new_transaction(i.into(), ())],
4473				None,
4474			)
4475			.unwrap();
4476			blocks.push(hash);
4477			prev_hash = hash;
4478		}
4479
4480		for i in 0..2 {
4481			let hash = insert_block(
4482				&backend,
4483				2,
4484				blocks[1],
4485				None,
4486				subsoil::core::H256::random(),
4487				vec![UncheckedXt::new_transaction(i.into(), ())],
4488				None,
4489			)
4490			.unwrap();
4491			blocks.push(hash);
4492		}
4493
4494		// insert a fork at block 1, which becomes best block
4495		let best_hash = insert_block(
4496			&backend,
4497			1,
4498			blocks[0],
4499			None,
4500			subsoil::core::H256::random(),
4501			vec![UncheckedXt::new_transaction(42.into(), ())],
4502			None,
4503		)
4504		.unwrap();
4505
4506		assert_eq!(backend.blockchain().info().best_hash, best_hash);
4507		assert!(backend.remove_leaf_block(best_hash).is_err());
4508
4509		assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], blocks[3], best_hash]);
4510		assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2], blocks[3]]);
4511
4512		assert!(backend.have_state_at(blocks[3], 2));
4513		assert!(backend.blockchain().header(blocks[3]).unwrap().is_some());
4514		backend.remove_leaf_block(blocks[3]).unwrap();
4515		assert!(!backend.have_state_at(blocks[3], 2));
4516		assert!(backend.blockchain().header(blocks[3]).unwrap().is_none());
4517		assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], best_hash]);
4518		assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2]]);
4519
4520		assert!(backend.have_state_at(blocks[2], 2));
4521		assert!(backend.blockchain().header(blocks[2]).unwrap().is_some());
4522		backend.remove_leaf_block(blocks[2]).unwrap();
4523		assert!(!backend.have_state_at(blocks[2], 2));
4524		assert!(backend.blockchain().header(blocks[2]).unwrap().is_none());
4525		assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash, blocks[1]]);
4526		assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![]);
4527
4528		assert!(backend.have_state_at(blocks[1], 1));
4529		assert!(backend.blockchain().header(blocks[1]).unwrap().is_some());
4530		backend.remove_leaf_block(blocks[1]).unwrap();
4531		assert!(!backend.have_state_at(blocks[1], 1));
4532		assert!(backend.blockchain().header(blocks[1]).unwrap().is_none());
4533		assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash]);
4534		assert_eq!(backend.blockchain().children(blocks[0]).unwrap(), vec![best_hash]);
4535	}
4536
4537	#[test]
4538	fn test_import_existing_block_as_new_head() {
4539		let backend: Backend<Block> = Backend::new_test(10, 3);
4540		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4541		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4542		let block2 = insert_header(&backend, 2, block1, None, Default::default());
4543		let block3 = insert_header(&backend, 3, block2, None, Default::default());
4544		let block4 = insert_header(&backend, 4, block3, None, Default::default());
4545		let block5 = insert_header(&backend, 5, block4, None, Default::default());
4546		assert_eq!(backend.blockchain().info().best_hash, block5);
4547
4548		// Insert 1 as best again. This should fail because canonicalization_delay == 3 and best ==
4549		// 5
4550		let header = Header {
4551			number: 1,
4552			parent_hash: block0,
4553			state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
4554			digest: Default::default(),
4555			extrinsics_root: Default::default(),
4556		};
4557		let mut op = backend.begin_operation().unwrap();
4558		op.set_block_data(header, None, None, None, NewBlockState::Best, true).unwrap();
4559		assert!(matches!(
4560			backend.commit_operation(op),
4561			Err(crate::blockchain::Error::SetHeadTooOld)
4562		));
4563
4564		// Insert 2 as best again.
4565		let header = backend.blockchain().header(block2).unwrap().unwrap();
4566		let mut op = backend.begin_operation().unwrap();
4567		op.set_block_data(header, None, None, None, NewBlockState::Best, true).unwrap();
4568		backend.commit_operation(op).unwrap();
4569		assert_eq!(backend.blockchain().info().best_hash, block2);
4570	}
4571
4572	#[test]
4573	fn test_import_existing_block_as_final() {
4574		let backend: Backend<Block> = Backend::new_test(10, 10);
4575		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4576		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4577		let _block2 = insert_header(&backend, 2, block1, None, Default::default());
4578		// Genesis is auto finalized, the rest are not.
4579		assert_eq!(backend.blockchain().info().finalized_hash, block0);
4580
4581		// Insert 1 as final again.
4582		let header = backend.blockchain().header(block1).unwrap().unwrap();
4583
4584		let mut op = backend.begin_operation().unwrap();
4585		op.set_block_data(header, None, None, None, NewBlockState::Final, true).unwrap();
4586		backend.commit_operation(op).unwrap();
4587
4588		assert_eq!(backend.blockchain().info().finalized_hash, block1);
4589	}
4590
4591	#[test]
4592	fn test_import_existing_state_fails() {
4593		let backend: Backend<Block> = Backend::new_test(10, 10);
4594		let genesis =
4595			insert_block(&backend, 0, Default::default(), None, Default::default(), vec![], None)
4596				.unwrap();
4597
4598		insert_block(&backend, 1, genesis, None, Default::default(), vec![], None).unwrap();
4599		let err = insert_block(&backend, 1, genesis, None, Default::default(), vec![], None)
4600			.err()
4601			.unwrap();
4602		match err {
4603			crate::blockchain::Error::StateDatabase(m) if m == "Block already exists" => (),
4604			e @ _ => panic!("Unexpected error {:?}", e),
4605		}
4606	}
4607
4608	#[test]
4609	fn test_leaves_not_created_for_ancient_blocks() {
4610		let backend: Backend<Block> = Backend::new_test(10, 10);
4611		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4612
4613		let block1_a = insert_header(&backend, 1, block0, None, Default::default());
4614		let block2_a = insert_header(&backend, 2, block1_a, None, Default::default());
4615		backend.finalize_block(block1_a, None).unwrap();
4616		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]);
4617
4618		// Insert a fork prior to finalization point. Leave should not be created.
4619		insert_header_no_head(&backend, 1, block0, [1; 32].into());
4620		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]);
4621	}
4622
4623	#[test]
4624	fn revert_non_best_blocks() {
4625		let backend = Backend::<Block>::new_test(10, 10);
4626
4627		let genesis =
4628			insert_block(&backend, 0, Default::default(), None, Default::default(), vec![], None)
4629				.unwrap();
4630
4631		let block1 =
4632			insert_block(&backend, 1, genesis, None, Default::default(), vec![], None).unwrap();
4633
4634		let block2 =
4635			insert_block(&backend, 2, block1, None, Default::default(), vec![], None).unwrap();
4636
4637		let block3 = {
4638			let mut op = backend.begin_operation().unwrap();
4639			backend.begin_state_operation(&mut op, block1).unwrap();
4640			let header = Header {
4641				number: 3,
4642				parent_hash: block2,
4643				state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
4644				digest: Default::default(),
4645				extrinsics_root: Default::default(),
4646			};
4647
4648			op.set_block_data(
4649				header.clone(),
4650				Some(Vec::new()),
4651				None,
4652				None,
4653				NewBlockState::Normal,
4654				true,
4655			)
4656			.unwrap();
4657
4658			backend.commit_operation(op).unwrap();
4659
4660			header.hash()
4661		};
4662
4663		let block4 = {
4664			let mut op = backend.begin_operation().unwrap();
4665			backend.begin_state_operation(&mut op, block2).unwrap();
4666			let header = Header {
4667				number: 4,
4668				parent_hash: block3,
4669				state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
4670				digest: Default::default(),
4671				extrinsics_root: Default::default(),
4672			};
4673
4674			op.set_block_data(
4675				header.clone(),
4676				Some(Vec::new()),
4677				None,
4678				None,
4679				NewBlockState::Normal,
4680				true,
4681			)
4682			.unwrap();
4683
4684			backend.commit_operation(op).unwrap();
4685
4686			header.hash()
4687		};
4688
4689		let block3_fork = {
4690			let mut op = backend.begin_operation().unwrap();
4691			backend.begin_state_operation(&mut op, block2).unwrap();
4692			let header = Header {
4693				number: 3,
4694				parent_hash: block2,
4695				state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
4696				digest: Default::default(),
4697				extrinsics_root: H256::from_low_u64_le(42),
4698			};
4699
4700			op.set_block_data(
4701				header.clone(),
4702				Some(Vec::new()),
4703				None,
4704				None,
4705				NewBlockState::Normal,
4706				true,
4707			)
4708			.unwrap();
4709
4710			backend.commit_operation(op).unwrap();
4711
4712			header.hash()
4713		};
4714
4715		assert!(backend.have_state_at(block1, 1));
4716		assert!(backend.have_state_at(block2, 2));
4717		assert!(backend.have_state_at(block3, 3));
4718		assert!(backend.have_state_at(block4, 4));
4719		assert!(backend.have_state_at(block3_fork, 3));
4720
4721		assert_eq!(backend.blockchain.leaves().unwrap(), vec![block4, block3_fork]);
4722		assert_eq!(4, backend.blockchain.leaves.read().highest_leaf().unwrap().0);
4723
4724		assert_eq!(3, backend.revert(1, false).unwrap().0);
4725
4726		assert!(backend.have_state_at(block1, 1));
4727
4728		let ensure_pruned = |hash, number: u32| {
4729			assert_eq!(
4730				backend.blockchain.status(hash).unwrap(),
4731				crate::client_api::blockchain::BlockStatus::Unknown
4732			);
4733			assert!(
4734				backend
4735					.blockchain
4736					.db
4737					.get(columns::BODY, &number_and_hash_to_lookup_key(number, hash).unwrap())
4738					.is_none(),
4739				"{number}"
4740			);
4741			assert!(
4742				backend
4743					.blockchain
4744					.db
4745					.get(columns::HEADER, &number_and_hash_to_lookup_key(number, hash).unwrap())
4746					.is_none(),
4747				"{number}"
4748			);
4749		};
4750
4751		ensure_pruned(block2, 2);
4752		ensure_pruned(block3, 3);
4753		ensure_pruned(block4, 4);
4754		ensure_pruned(block3_fork, 3);
4755
4756		assert_eq!(backend.blockchain.leaves().unwrap(), vec![block1]);
4757		assert_eq!(1, backend.blockchain.leaves.read().highest_leaf().unwrap().0);
4758	}
4759
4760	#[test]
4761	fn revert_finalized_blocks() {
4762		let pruning_modes = [BlocksPruning::Some(10), BlocksPruning::KeepAll];
4763
4764		// we will create a chain with 11 blocks, finalize block #8 and then
4765		// attempt to revert 5 blocks.
4766		for pruning_mode in pruning_modes {
4767			let backend = Backend::<Block>::new_test_with_tx_storage(pruning_mode, 1);
4768
4769			let mut parent = Default::default();
4770			for i in 0..=10 {
4771				parent = insert_block(&backend, i, parent, None, Default::default(), vec![], None)
4772					.unwrap();
4773			}
4774
4775			assert_eq!(backend.blockchain().info().best_number, 10);
4776
4777			let block8 = backend.blockchain().hash(8).unwrap().unwrap();
4778			backend.finalize_block(block8, None).unwrap();
4779			backend.revert(5, true).unwrap();
4780
4781			match pruning_mode {
4782				// we can only revert to blocks for which we have state, if pruning is enabled
4783				// then the last state available will be that of the latest finalized block
4784				BlocksPruning::Some(_) => {
4785					assert_eq!(backend.blockchain().info().finalized_number, 8)
4786				},
4787				// otherwise if we're not doing state pruning we can revert past finalized blocks
4788				_ => assert_eq!(backend.blockchain().info().finalized_number, 5),
4789			}
4790		}
4791	}
4792
4793	#[test]
4794	fn test_no_duplicated_leaves_allowed() {
4795		let backend: Backend<Block> = Backend::new_test(10, 10);
4796		let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
4797		let block1 = insert_header(&backend, 1, block0, None, Default::default());
4798		// Add block 2 not as the best block
4799		let block2 = insert_header_no_head(&backend, 2, block1, Default::default());
4800		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]);
4801		assert_eq!(backend.blockchain().info().best_hash, block1);
4802
4803		// Add block 2 as the best block
4804		let block2 = insert_header(&backend, 2, block1, None, Default::default());
4805		assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]);
4806		assert_eq!(backend.blockchain().info().best_hash, block2);
4807	}
4808
4809	#[test]
4810	fn force_delayed_canonicalize_waiting_for_blocks_to_be_finalized() {
4811		let pruning_modes =
4812			[BlocksPruning::Some(10), BlocksPruning::KeepAll, BlocksPruning::KeepFinalized];
4813
4814		for pruning_mode in pruning_modes {
4815			eprintln!("Running with pruning mode: {:?}", pruning_mode);
4816
4817			let backend = Backend::<Block>::new_test_with_tx_storage(pruning_mode, 1);
4818
4819			let genesis = insert_block(
4820				&backend,
4821				0,
4822				Default::default(),
4823				None,
4824				Default::default(),
4825				vec![],
4826				None,
4827			)
4828			.unwrap();
4829
4830			let block1 = {
4831				let mut op = backend.begin_operation().unwrap();
4832				backend.begin_state_operation(&mut op, genesis).unwrap();
4833				let mut header = Header {
4834					number: 1,
4835					parent_hash: genesis,
4836					state_root: Default::default(),
4837					digest: Default::default(),
4838					extrinsics_root: Default::default(),
4839				};
4840
4841				let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))];
4842
4843				let (root, overlay) = op.old_state.storage_root(
4844					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
4845					StateVersion::V1,
4846				);
4847				op.update_db_storage(overlay).unwrap();
4848				header.state_root = root.into();
4849
4850				op.update_storage(storage, Vec::new()).unwrap();
4851
4852				op.set_block_data(
4853					header.clone(),
4854					Some(Vec::new()),
4855					None,
4856					None,
4857					NewBlockState::Normal,
4858					true,
4859				)
4860				.unwrap();
4861
4862				backend.commit_operation(op).unwrap();
4863
4864				header.hash()
4865			};
4866
4867			if matches!(pruning_mode, BlocksPruning::Some(_)) {
4868				assert_eq!(
4869					LastCanonicalized::Block(0),
4870					backend.storage.state_db.last_canonicalized()
4871				);
4872			}
4873
4874			// This should not trigger any forced canonicalization as we didn't have imported any
4875			// best block by now.
4876			let block2 = {
4877				let mut op = backend.begin_operation().unwrap();
4878				backend.begin_state_operation(&mut op, block1).unwrap();
4879				let mut header = Header {
4880					number: 2,
4881					parent_hash: block1,
4882					state_root: Default::default(),
4883					digest: Default::default(),
4884					extrinsics_root: Default::default(),
4885				};
4886
4887				let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 2]))];
4888
4889				let (root, overlay) = op.old_state.storage_root(
4890					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
4891					StateVersion::V1,
4892				);
4893				op.update_db_storage(overlay).unwrap();
4894				header.state_root = root.into();
4895
4896				op.update_storage(storage, Vec::new()).unwrap();
4897
4898				op.set_block_data(
4899					header.clone(),
4900					Some(Vec::new()),
4901					None,
4902					None,
4903					NewBlockState::Normal,
4904					true,
4905				)
4906				.unwrap();
4907
4908				backend.commit_operation(op).unwrap();
4909
4910				header.hash()
4911			};
4912
4913			if matches!(pruning_mode, BlocksPruning::Some(_)) {
4914				assert_eq!(
4915					LastCanonicalized::Block(0),
4916					backend.storage.state_db.last_canonicalized()
4917				);
4918			}
4919
4920			// This should also not trigger it yet, because we import a best block, but the best
4921			// block from the POV of the db is still at `0`.
4922			let block3 = {
4923				let mut op = backend.begin_operation().unwrap();
4924				backend.begin_state_operation(&mut op, block2).unwrap();
4925				let mut header = Header {
4926					number: 3,
4927					parent_hash: block2,
4928					state_root: Default::default(),
4929					digest: Default::default(),
4930					extrinsics_root: Default::default(),
4931				};
4932
4933				let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 3]))];
4934
4935				let (root, overlay) = op.old_state.storage_root(
4936					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
4937					StateVersion::V1,
4938				);
4939				op.update_db_storage(overlay).unwrap();
4940				header.state_root = root.into();
4941
4942				op.update_storage(storage, Vec::new()).unwrap();
4943
4944				op.set_block_data(
4945					header.clone(),
4946					Some(Vec::new()),
4947					None,
4948					None,
4949					NewBlockState::Best,
4950					true,
4951				)
4952				.unwrap();
4953
4954				backend.commit_operation(op).unwrap();
4955
4956				header.hash()
4957			};
4958
4959			// Now it should kick in.
4960			let block4 = {
4961				let mut op = backend.begin_operation().unwrap();
4962				backend.begin_state_operation(&mut op, block3).unwrap();
4963				let mut header = Header {
4964					number: 4,
4965					parent_hash: block3,
4966					state_root: Default::default(),
4967					digest: Default::default(),
4968					extrinsics_root: Default::default(),
4969				};
4970
4971				let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 4]))];
4972
4973				let (root, overlay) = op.old_state.storage_root(
4974					storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
4975					StateVersion::V1,
4976				);
4977				op.update_db_storage(overlay).unwrap();
4978				header.state_root = root.into();
4979
4980				op.update_storage(storage, Vec::new()).unwrap();
4981
4982				op.set_block_data(
4983					header.clone(),
4984					Some(Vec::new()),
4985					None,
4986					None,
4987					NewBlockState::Best,
4988					true,
4989				)
4990				.unwrap();
4991
4992				backend.commit_operation(op).unwrap();
4993
4994				header.hash()
4995			};
4996
4997			if matches!(pruning_mode, BlocksPruning::Some(_)) {
4998				assert_eq!(
4999					LastCanonicalized::Block(2),
5000					backend.storage.state_db.last_canonicalized()
5001				);
5002			}
5003
5004			assert_eq!(block1, backend.blockchain().hash(1).unwrap().unwrap());
5005			assert_eq!(block2, backend.blockchain().hash(2).unwrap().unwrap());
5006			assert_eq!(block3, backend.blockchain().hash(3).unwrap().unwrap());
5007			assert_eq!(block4, backend.blockchain().hash(4).unwrap().unwrap());
5008		}
5009	}
5010
5011	#[test]
5012	fn test_pinned_blocks_on_finalize() {
5013		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
5014		let mut blocks = Vec::new();
5015		let mut prev_hash = Default::default();
5016
5017		let build_justification = |i: u64| ([0, 0, 0, 0], vec![i.try_into().unwrap()]);
5018		// Block tree:
5019		//   0 -> 1 -> 2 -> 3 -> 4
5020		for i in 0..5 {
5021			let hash = insert_block(
5022				&backend,
5023				i,
5024				prev_hash,
5025				None,
5026				Default::default(),
5027				vec![UncheckedXt::new_transaction(i.into(), ())],
5028				None,
5029			)
5030			.unwrap();
5031			blocks.push(hash);
5032			// Avoid block pruning.
5033			backend.pin_block(blocks[i as usize]).unwrap();
5034
5035			prev_hash = hash;
5036		}
5037
5038		let bc = backend.blockchain();
5039
5040		// Check that we can properly access values when there is reference count
5041		// but no value.
5042		assert_eq!(
5043			Some(vec![UncheckedXt::new_transaction(1.into(), ())]),
5044			bc.body(blocks[1]).unwrap()
5045		);
5046
5047		// Block 1 gets pinned three times
5048		backend.pin_block(blocks[1]).unwrap();
5049		backend.pin_block(blocks[1]).unwrap();
5050
5051		// Finalize all blocks. This will trigger pruning.
5052		let mut op = backend.begin_operation().unwrap();
5053		backend.begin_state_operation(&mut op, blocks[4]).unwrap();
5054		for i in 1..5 {
5055			op.mark_finalized(blocks[i], Some(build_justification(i.try_into().unwrap())))
5056				.unwrap();
5057		}
5058		backend.commit_operation(op).unwrap();
5059
5060		// Block 0, 1, 2, 3 are pinned, so all values should be cached.
5061		// Block 4 is inside the pruning window, its value is in db.
5062		assert_eq!(
5063			Some(vec![UncheckedXt::new_transaction(0.into(), ())]),
5064			bc.body(blocks[0]).unwrap()
5065		);
5066
5067		assert_eq!(
5068			Some(vec![UncheckedXt::new_transaction(1.into(), ())]),
5069			bc.body(blocks[1]).unwrap()
5070		);
5071		assert_eq!(
5072			Some(Justifications::from(build_justification(1))),
5073			bc.justifications(blocks[1]).unwrap()
5074		);
5075
5076		assert_eq!(
5077			Some(vec![UncheckedXt::new_transaction(2.into(), ())]),
5078			bc.body(blocks[2]).unwrap()
5079		);
5080		assert_eq!(
5081			Some(Justifications::from(build_justification(2))),
5082			bc.justifications(blocks[2]).unwrap()
5083		);
5084
5085		assert_eq!(
5086			Some(vec![UncheckedXt::new_transaction(3.into(), ())]),
5087			bc.body(blocks[3]).unwrap()
5088		);
5089		assert_eq!(
5090			Some(Justifications::from(build_justification(3))),
5091			bc.justifications(blocks[3]).unwrap()
5092		);
5093
5094		assert_eq!(
5095			Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
5096			bc.body(blocks[4]).unwrap()
5097		);
5098		assert_eq!(
5099			Some(Justifications::from(build_justification(4))),
5100			bc.justifications(blocks[4]).unwrap()
5101		);
5102
5103		// Unpin all blocks. Values should be removed from cache.
5104		for block in &blocks {
5105			backend.unpin_block(*block);
5106		}
5107
5108		assert!(bc.body(blocks[0]).unwrap().is_none());
5109		// Block 1 was pinned twice, we expect it to be still cached
5110		assert!(bc.body(blocks[1]).unwrap().is_some());
5111		assert!(bc.justifications(blocks[1]).unwrap().is_some());
5112		// Headers should also be available while pinned
5113		assert!(bc.header(blocks[1]).ok().flatten().is_some());
5114		assert!(bc.body(blocks[2]).unwrap().is_none());
5115		assert!(bc.justifications(blocks[2]).unwrap().is_none());
5116		assert!(bc.body(blocks[3]).unwrap().is_none());
5117		assert!(bc.justifications(blocks[3]).unwrap().is_none());
5118
5119		// After these unpins, block 1 should also be removed
5120		backend.unpin_block(blocks[1]);
5121		assert!(bc.body(blocks[1]).unwrap().is_some());
5122		assert!(bc.justifications(blocks[1]).unwrap().is_some());
5123		backend.unpin_block(blocks[1]);
5124		assert!(bc.body(blocks[1]).unwrap().is_none());
5125		assert!(bc.justifications(blocks[1]).unwrap().is_none());
5126
5127		// Block 4 is inside the pruning window and still kept
5128		assert_eq!(
5129			Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
5130			bc.body(blocks[4]).unwrap()
5131		);
5132		assert_eq!(
5133			Some(Justifications::from(build_justification(4))),
5134			bc.justifications(blocks[4]).unwrap()
5135		);
5136
5137		// Block tree:
5138		//   0 -> 1 -> 2 -> 3 -> 4 -> 5
5139		let hash = insert_block(
5140			&backend,
5141			5,
5142			prev_hash,
5143			None,
5144			Default::default(),
5145			vec![UncheckedXt::new_transaction(5.into(), ())],
5146			None,
5147		)
5148		.unwrap();
5149		blocks.push(hash);
5150
5151		backend.pin_block(blocks[4]).unwrap();
5152		// Mark block 5 as finalized.
5153		let mut op = backend.begin_operation().unwrap();
5154		backend.begin_state_operation(&mut op, blocks[5]).unwrap();
5155		op.mark_finalized(blocks[5], Some(build_justification(5))).unwrap();
5156		backend.commit_operation(op).unwrap();
5157
5158		assert!(bc.body(blocks[0]).unwrap().is_none());
5159		assert!(bc.body(blocks[1]).unwrap().is_none());
5160		assert!(bc.body(blocks[2]).unwrap().is_none());
5161		assert!(bc.body(blocks[3]).unwrap().is_none());
5162
5163		assert_eq!(
5164			Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
5165			bc.body(blocks[4]).unwrap()
5166		);
5167		assert_eq!(
5168			Some(Justifications::from(build_justification(4))),
5169			bc.justifications(blocks[4]).unwrap()
5170		);
5171		assert_eq!(
5172			Some(vec![UncheckedXt::new_transaction(5.into(), ())]),
5173			bc.body(blocks[5]).unwrap()
5174		);
5175		assert!(bc.header(blocks[5]).ok().flatten().is_some());
5176
5177		backend.unpin_block(blocks[4]);
5178		assert!(bc.body(blocks[4]).unwrap().is_none());
5179		assert!(bc.justifications(blocks[4]).unwrap().is_none());
5180
5181		// Append a justification to block 5.
5182		backend.append_justification(blocks[5], ([0, 0, 0, 1], vec![42])).unwrap();
5183
5184		let hash = insert_block(
5185			&backend,
5186			6,
5187			blocks[5],
5188			None,
5189			Default::default(),
5190			vec![UncheckedXt::new_transaction(6.into(), ())],
5191			None,
5192		)
5193		.unwrap();
5194		blocks.push(hash);
5195
5196		// Pin block 5 so it gets loaded into the cache on prune
5197		backend.pin_block(blocks[5]).unwrap();
5198
5199		// Finalize block 6 so block 5 gets pruned. Since it is pinned both justifications should be
5200		// in memory.
5201		let mut op = backend.begin_operation().unwrap();
5202		backend.begin_state_operation(&mut op, blocks[6]).unwrap();
5203		op.mark_finalized(blocks[6], None).unwrap();
5204		backend.commit_operation(op).unwrap();
5205
5206		assert_eq!(
5207			Some(vec![UncheckedXt::new_transaction(5.into(), ())]),
5208			bc.body(blocks[5]).unwrap()
5209		);
5210		assert!(bc.header(blocks[5]).ok().flatten().is_some());
5211		let mut expected = Justifications::from(build_justification(5));
5212		expected.append(([0, 0, 0, 1], vec![42]));
5213		assert_eq!(Some(expected), bc.justifications(blocks[5]).unwrap());
5214	}
5215
5216	#[test]
5217	fn test_pinned_blocks_on_finalize_with_fork() {
5218		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
5219		let mut blocks = Vec::new();
5220		let mut prev_hash = Default::default();
5221
5222		// Block tree:
5223		//   0 -> 1 -> 2 -> 3 -> 4
5224		for i in 0..5 {
5225			let hash = insert_block(
5226				&backend,
5227				i,
5228				prev_hash,
5229				None,
5230				Default::default(),
5231				vec![UncheckedXt::new_transaction(i.into(), ())],
5232				None,
5233			)
5234			.unwrap();
5235			blocks.push(hash);
5236
5237			// Avoid block pruning.
5238			backend.pin_block(blocks[i as usize]).unwrap();
5239
5240			prev_hash = hash;
5241		}
5242
5243		// Insert a fork at the second block.
5244		// Block tree:
5245		//   0 -> 1 -> 2 -> 3 -> 4
5246		//        \ -> 2 -> 3
5247		let fork_hash_root = insert_block(
5248			&backend,
5249			2,
5250			blocks[1],
5251			None,
5252			H256::random(),
5253			vec![UncheckedXt::new_transaction(2.into(), ())],
5254			None,
5255		)
5256		.unwrap();
5257		let fork_hash_3 = insert_block(
5258			&backend,
5259			3,
5260			fork_hash_root,
5261			None,
5262			H256::random(),
5263			vec![
5264				UncheckedXt::new_transaction(3.into(), ()),
5265				UncheckedXt::new_transaction(11.into(), ()),
5266			],
5267			None,
5268		)
5269		.unwrap();
5270
5271		// Do not prune the fork hash.
5272		backend.pin_block(fork_hash_3).unwrap();
5273
5274		let mut op = backend.begin_operation().unwrap();
5275		backend.begin_state_operation(&mut op, blocks[4]).unwrap();
5276		op.mark_head(blocks[4]).unwrap();
5277		backend.commit_operation(op).unwrap();
5278
5279		for i in 1..5 {
5280			let mut op = backend.begin_operation().unwrap();
5281			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
5282			op.mark_finalized(blocks[i], None).unwrap();
5283			backend.commit_operation(op).unwrap();
5284		}
5285
5286		let bc = backend.blockchain();
5287		assert_eq!(
5288			Some(vec![UncheckedXt::new_transaction(0.into(), ())]),
5289			bc.body(blocks[0]).unwrap()
5290		);
5291		assert_eq!(
5292			Some(vec![UncheckedXt::new_transaction(1.into(), ())]),
5293			bc.body(blocks[1]).unwrap()
5294		);
5295		assert_eq!(
5296			Some(vec![UncheckedXt::new_transaction(2.into(), ())]),
5297			bc.body(blocks[2]).unwrap()
5298		);
5299		assert_eq!(
5300			Some(vec![UncheckedXt::new_transaction(3.into(), ())]),
5301			bc.body(blocks[3]).unwrap()
5302		);
5303		assert_eq!(
5304			Some(vec![UncheckedXt::new_transaction(4.into(), ())]),
5305			bc.body(blocks[4]).unwrap()
5306		);
5307		// Check the fork hashes.
5308		assert_eq!(None, bc.body(fork_hash_root).unwrap());
5309		assert_eq!(
5310			Some(vec![
5311				UncheckedXt::new_transaction(3.into(), ()),
5312				UncheckedXt::new_transaction(11.into(), ())
5313			]),
5314			bc.body(fork_hash_3).unwrap()
5315		);
5316
5317		// Unpin all blocks, except the forked one.
5318		for block in &blocks {
5319			backend.unpin_block(*block);
5320		}
5321		assert!(bc.body(blocks[0]).unwrap().is_none());
5322		assert!(bc.body(blocks[1]).unwrap().is_none());
5323		assert!(bc.body(blocks[2]).unwrap().is_none());
5324		assert!(bc.body(blocks[3]).unwrap().is_none());
5325
5326		assert!(bc.body(fork_hash_3).unwrap().is_some());
5327		backend.unpin_block(fork_hash_3);
5328		assert!(bc.body(fork_hash_3).unwrap().is_none());
5329	}
5330
5331	#[test]
5332	fn prune_blocks_with_empty_predicates_prunes_all() {
5333		// Test backward compatibility: empty predicates means all blocks are pruned
5334		let backend = Backend::<Block>::new_test_with_tx_storage_and_filters(
5335			BlocksPruning::Some(2),
5336			0,
5337			vec![], // Empty predicates
5338		);
5339
5340		let mut blocks = Vec::new();
5341		let mut prev_hash = Default::default();
5342
5343		// Create 5 blocks
5344		for i in 0..5 {
5345			let hash = insert_block(
5346				&backend,
5347				i,
5348				prev_hash,
5349				None,
5350				Default::default(),
5351				vec![UncheckedXt::new_transaction(i.into(), ())],
5352				None,
5353			)
5354			.unwrap();
5355			blocks.push(hash);
5356			prev_hash = hash;
5357		}
5358
5359		// Justification - but no predicate to preserve it
5360		let justification = (CONS0_ENGINE_ID, vec![1, 2, 3]);
5361
5362		// Finalize blocks, adding justification to block 1
5363		{
5364			let mut op = backend.begin_operation().unwrap();
5365			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
5366			op.mark_finalized(blocks[1], Some(justification.clone())).unwrap();
5367			op.mark_finalized(blocks[2], None).unwrap();
5368			op.mark_finalized(blocks[3], None).unwrap();
5369			op.mark_finalized(blocks[4], None).unwrap();
5370			backend.commit_operation(op).unwrap();
5371		}
5372
5373		let bc = backend.blockchain();
5374
5375		// All blocks outside pruning window should be pruned, even with justification
5376		assert_eq!(None, bc.body(blocks[0]).unwrap());
5377		assert_eq!(None, bc.body(blocks[1]).unwrap()); // Has justification but no predicate
5378		assert_eq!(None, bc.body(blocks[2]).unwrap());
5379
5380		// Blocks 3 and 4 are within the pruning window
5381		assert!(bc.body(blocks[3]).unwrap().is_some());
5382		assert!(bc.body(blocks[4]).unwrap().is_some());
5383	}
5384
5385	#[test]
5386	fn prune_blocks_multiple_filters_or_logic() {
5387		// Test that multiple filters use OR logic: if ANY filter matches, block is kept
5388		let backend = Backend::<Block>::new_test_with_tx_storage_and_filters(
5389			BlocksPruning::Some(2),
5390			0,
5391			vec![
5392				Arc::new(|j: &Justifications| j.get(CONS0_ENGINE_ID).is_some()),
5393				Arc::new(|j: &Justifications| j.get(CONS1_ENGINE_ID).is_some()),
5394			],
5395		);
5396
5397		let mut blocks = Vec::new();
5398		let mut prev_hash = Default::default();
5399
5400		// Create 7 blocks
5401		for i in 0..7 {
5402			let hash = insert_block(
5403				&backend,
5404				i,
5405				prev_hash,
5406				None,
5407				Default::default(),
5408				vec![UncheckedXt::new_transaction(i.into(), ())],
5409				None,
5410			)
5411			.unwrap();
5412			blocks.push(hash);
5413			prev_hash = hash;
5414		}
5415
5416		let cons0_justification = (CONS0_ENGINE_ID, vec![1, 2, 3]);
5417		let cons1_justification = (CONS1_ENGINE_ID, vec![4, 5, 6]);
5418
5419		// Finalize blocks with different justification patterns
5420		{
5421			let mut op = backend.begin_operation().unwrap();
5422			backend.begin_state_operation(&mut op, blocks[6]).unwrap();
5423			// Block 1: CONS0 only - should be preserved
5424			op.mark_finalized(blocks[1], Some(cons0_justification.clone())).unwrap();
5425			// Block 2: CONS1 only - should be preserved
5426			op.mark_finalized(blocks[2], Some(cons1_justification.clone())).unwrap();
5427			// Block 3: No justification - should be pruned
5428			op.mark_finalized(blocks[3], None).unwrap();
5429			// Block 4: Random/unknown engine ID - should be pruned
5430			op.mark_finalized(blocks[4], Some(([9, 9, 9, 9], vec![7, 8, 9]))).unwrap();
5431			op.mark_finalized(blocks[5], None).unwrap();
5432			op.mark_finalized(blocks[6], None).unwrap();
5433			backend.commit_operation(op).unwrap();
5434		}
5435
5436		let bc = backend.blockchain();
5437
5438		// Block 0 should be pruned (outside window, no justification)
5439		assert_eq!(None, bc.body(blocks[0]).unwrap());
5440
5441		// Block 1 should be preserved (has CONS0 justification)
5442		assert!(bc.body(blocks[1]).unwrap().is_some());
5443
5444		// Block 2 should be preserved (has CONS1 justification)
5445		assert!(bc.body(blocks[2]).unwrap().is_some());
5446
5447		// Block 3 should be pruned (no justification)
5448		assert_eq!(None, bc.body(blocks[3]).unwrap());
5449
5450		// Block 4 should be pruned (unknown engine ID)
5451		assert_eq!(None, bc.body(blocks[4]).unwrap());
5452
5453		// Blocks 5 and 6 are within the pruning window
5454		assert!(bc.body(blocks[5]).unwrap().is_some());
5455		assert!(bc.body(blocks[6]).unwrap().is_some());
5456	}
5457
5458	#[test]
5459	fn prune_blocks_filter_only_matches_specific_engine() {
5460		// Test that a filter for one engine ID does NOT preserve blocks with a different engine ID
5461		let backend = Backend::<Block>::new_test_with_tx_storage_and_filters(
5462			BlocksPruning::Some(2),
5463			0,
5464			vec![Arc::new(|j: &Justifications| j.get(CONS0_ENGINE_ID).is_some())],
5465		);
5466
5467		let mut blocks = Vec::new();
5468		let mut prev_hash = Default::default();
5469
5470		// Create 5 blocks
5471		for i in 0..5 {
5472			let hash = insert_block(
5473				&backend,
5474				i,
5475				prev_hash,
5476				None,
5477				Default::default(),
5478				vec![UncheckedXt::new_transaction(i.into(), ())],
5479				None,
5480			)
5481			.unwrap();
5482			blocks.push(hash);
5483			prev_hash = hash;
5484		}
5485
5486		let cons1_justification = (CONS1_ENGINE_ID, vec![4, 5, 6]);
5487
5488		// Finalize blocks, adding CONS1 justification to block 1
5489		{
5490			let mut op = backend.begin_operation().unwrap();
5491			backend.begin_state_operation(&mut op, blocks[4]).unwrap();
5492			// Block 1 gets CONS1 justification - should NOT be preserved by CONS0 filter
5493			op.mark_finalized(blocks[1], Some(cons1_justification.clone())).unwrap();
5494			op.mark_finalized(blocks[2], None).unwrap();
5495			op.mark_finalized(blocks[3], None).unwrap();
5496			op.mark_finalized(blocks[4], None).unwrap();
5497			backend.commit_operation(op).unwrap();
5498		}
5499
5500		let bc = backend.blockchain();
5501
5502		// Block 0 should be pruned
5503		assert_eq!(None, bc.body(blocks[0]).unwrap());
5504
5505		// Block 1 should also be pruned (CONS1 justification, but only CONS0 filter)
5506		assert_eq!(None, bc.body(blocks[1]).unwrap());
5507
5508		// Block 2 should be pruned
5509		assert_eq!(None, bc.body(blocks[2]).unwrap());
5510
5511		// Blocks 3 and 4 are within the pruning window
5512		assert!(bc.body(blocks[3]).unwrap().is_some());
5513		assert!(bc.body(blocks[4]).unwrap().is_some());
5514	}
5515}