Skip to main content

sp_trie/cache/
mod.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! Trie Cache
19//!
20//! Provides an implementation of the [`TrieCache`](trie_db::TrieCache) trait.
21//! The implementation is split into three types [`SharedTrieCache`], [`LocalTrieCache`] and
22//! [`TrieCache`]. The [`SharedTrieCache`] is the instance that should be kept around for the entire
23//! lifetime of the node. It will store all cached trie nodes and values on a global level. Then
24//! there is the [`LocalTrieCache`] that should be kept around per state instance requested from the
25//! backend. As there are very likely multiple accesses to the state per instance, this
26//! [`LocalTrieCache`] is used to cache the nodes and the values before they are merged back to the
27//! shared instance. Last but not least there is the [`TrieCache`] that is being used per access to
28//! the state. It will use the [`SharedTrieCache`] and the [`LocalTrieCache`] to fulfill cache
29//! requests. If both of them don't provide the requested data it will be inserted into the
30//! [`LocalTrieCache`] and then later into the [`SharedTrieCache`].
31//!
32//! The [`SharedTrieCache`] is bound to some maximum number of bytes. It is ensured that it never
33//! runs above this limit. However as long as data is cached inside a [`LocalTrieCache`] it isn't
34//! taken into account when limiting the [`SharedTrieCache`]. This means that for the lifetime of a
35//! [`LocalTrieCache`] the actual memory usage could be above the allowed maximum.
36
37use crate::{Error, NodeCodec};
38use hash_db::Hasher;
39use metrics::{HitStatsSnapshot, TrieHitStatsSnapshot};
40use nohash_hasher::BuildNoHashHasher;
41use parking_lot::{Mutex, MutexGuard, RwLockWriteGuard};
42use schnellru::LruMap;
43use shared_cache::{ValueCacheKey, ValueCacheRef};
44use std::{
45	collections::HashMap,
46	sync::{
47		atomic::{AtomicU64, Ordering},
48		Arc,
49	},
50	time::Duration,
51};
52use trie_db::{node::NodeOwned, CachedValue};
53
54mod metrics;
55mod shared_cache;
56
57pub use shared_cache::SharedTrieCache;
58
59use self::shared_cache::ValueCacheKeyHash;
60
61const LOG_TARGET: &str = "trie-cache";
62
63/// The maximum amount of time we'll wait trying to acquire the shared cache lock
64/// when the local cache is dropped and synchronized with the share cache.
65///
66/// This is just a failsafe; normally this should never trigger.
67const SHARED_CACHE_WRITE_LOCK_TIMEOUT: Duration = Duration::from_millis(100);
68
69/// The maximum number of existing keys in the shared cache that a single local cache
70/// can promote to the front of the LRU cache in one go.
71///
72/// If we have a big shared cache and the local cache hits all of those keys we don't
73/// want to spend forever bumping all of them.
74const SHARED_NODE_CACHE_MAX_PROMOTED_KEYS: u32 = 1792;
75/// Same as [`SHARED_NODE_CACHE_MAX_PROMOTED_KEYS`].
76const SHARED_VALUE_CACHE_MAX_PROMOTED_KEYS: u32 = 1792;
77
78/// The maximum portion of the shared cache (in percent) that a single local
79/// cache can replace in one go.
80///
81/// We don't want a single local cache instance to have the ability to replace
82/// everything in the shared cache.
83const SHARED_NODE_CACHE_MAX_REPLACE_PERCENT: usize = 33;
84/// Same as [`SHARED_NODE_CACHE_MAX_REPLACE_PERCENT`].
85const SHARED_VALUE_CACHE_MAX_REPLACE_PERCENT: usize = 33;
86
87/// The maximum inline capacity of the local cache, in bytes.
88///
89/// This is just an upper limit; since the maps are resized in powers of two
90/// their actual size will most likely not exactly match this.
91const LOCAL_NODE_CACHE_MAX_INLINE_SIZE: usize = 512 * 1024;
92/// Same as [`LOCAL_NODE_CACHE_MAX_INLINE_SIZE`].
93const LOCAL_VALUE_CACHE_MAX_INLINE_SIZE: usize = 512 * 1024;
94
95/// The maximum size of the memory allocated on the heap by the local cache, in bytes.
96///
97/// The size of the node cache should always be bigger than the value cache. The value
98/// cache is only holding weak references to the actual values found in the nodes and
99/// we account for the size of the node as part of the node cache.
100const LOCAL_NODE_CACHE_MAX_HEAP_SIZE: usize = 8 * 1024 * 1024;
101/// Same as [`LOCAL_NODE_CACHE_MAX_HEAP_SIZE`].
102const LOCAL_VALUE_CACHE_MAX_HEAP_SIZE: usize = 2 * 1024 * 1024;
103
104/// The size of the shared cache.
105#[derive(Debug, Clone, Copy)]
106pub struct CacheSize(usize);
107
108impl CacheSize {
109	/// An unlimited cache size.
110	pub const fn unlimited() -> Self {
111		CacheSize(usize::MAX)
112	}
113
114	/// A cache size `bytes` big.
115	pub const fn new(bytes: usize) -> Self {
116		CacheSize(bytes)
117	}
118}
119
120pub struct LocalNodeCacheLimiter {
121	/// The current size (in bytes) of data allocated by this cache on the heap.
122	///
123	/// This doesn't include the size of the map itself.
124	current_heap_size: usize,
125	config: LocalNodeCacheConfig,
126}
127
128impl LocalNodeCacheLimiter {
129	/// Creates a new limiter with the given configuration.
130	pub fn new(config: LocalNodeCacheConfig) -> Self {
131		Self { config, current_heap_size: 0 }
132	}
133}
134
135impl<H> schnellru::Limiter<H, NodeCached<H>> for LocalNodeCacheLimiter
136where
137	H: AsRef<[u8]> + std::fmt::Debug,
138{
139	type KeyToInsert<'a> = H;
140	type LinkType = u32;
141
142	#[inline]
143	fn is_over_the_limit(&self, length: usize) -> bool {
144		// Only enforce the limit if there's more than one element to make sure
145		// we can always add a new element to the cache.
146		if length <= 1 {
147			return false;
148		}
149
150		self.current_heap_size > self.config.local_node_cache_max_heap_size
151	}
152
153	#[inline]
154	fn on_insert<'a>(
155		&mut self,
156		_length: usize,
157		key: H,
158		cached_node: NodeCached<H>,
159	) -> Option<(H, NodeCached<H>)> {
160		self.current_heap_size += cached_node.heap_size();
161		Some((key, cached_node))
162	}
163
164	#[inline]
165	fn on_replace(
166		&mut self,
167		_length: usize,
168		_old_key: &mut H,
169		_new_key: H,
170		old_node: &mut NodeCached<H>,
171		new_node: &mut NodeCached<H>,
172	) -> bool {
173		debug_assert_eq!(_old_key.as_ref().len(), _new_key.as_ref().len());
174		self.current_heap_size =
175			self.current_heap_size + new_node.heap_size() - old_node.heap_size();
176		true
177	}
178
179	#[inline]
180	fn on_removed(&mut self, _key: &mut H, cached_node: &mut NodeCached<H>) {
181		self.current_heap_size -= cached_node.heap_size();
182	}
183
184	#[inline]
185	fn on_cleared(&mut self) {
186		self.current_heap_size = 0;
187	}
188
189	#[inline]
190	fn on_grow(&mut self, new_memory_usage: usize) -> bool {
191		new_memory_usage <= self.config.local_node_cache_max_inline_size
192	}
193}
194
195/// A limiter for the local value cache. This makes sure the local cache doesn't grow too big.
196pub struct LocalValueCacheLimiter {
197	/// The current size (in bytes) of data allocated by this cache on the heap.
198	///
199	/// This doesn't include the size of the map itself.
200	current_heap_size: usize,
201
202	config: LocalValueCacheConfig,
203}
204
205impl LocalValueCacheLimiter {
206	/// Creates a new limiter with the given configuration.
207	pub fn new(config: LocalValueCacheConfig) -> Self {
208		Self { config, current_heap_size: 0 }
209	}
210}
211
212impl<H> schnellru::Limiter<ValueCacheKey<H>, CachedValue<H>> for LocalValueCacheLimiter
213where
214	H: AsRef<[u8]>,
215{
216	type KeyToInsert<'a> = ValueCacheRef<'a, H>;
217	type LinkType = u32;
218
219	#[inline]
220	fn is_over_the_limit(&self, length: usize) -> bool {
221		// Only enforce the limit if there's more than one element to make sure
222		// we can always add a new element to the cache.
223		if length <= 1 {
224			return false;
225		}
226
227		self.current_heap_size > self.config.local_value_cache_max_heap_size
228	}
229
230	#[inline]
231	fn on_insert(
232		&mut self,
233		_length: usize,
234		key: Self::KeyToInsert<'_>,
235		value: CachedValue<H>,
236	) -> Option<(ValueCacheKey<H>, CachedValue<H>)> {
237		self.current_heap_size += key.storage_key.len();
238		Some((key.into(), value))
239	}
240
241	#[inline]
242	fn on_replace(
243		&mut self,
244		_length: usize,
245		_old_key: &mut ValueCacheKey<H>,
246		_new_key: ValueCacheRef<H>,
247		_old_value: &mut CachedValue<H>,
248		_new_value: &mut CachedValue<H>,
249	) -> bool {
250		debug_assert_eq!(_old_key.storage_key.len(), _new_key.storage_key.len());
251		true
252	}
253
254	#[inline]
255	fn on_removed(&mut self, key: &mut ValueCacheKey<H>, _: &mut CachedValue<H>) {
256		self.current_heap_size -= key.storage_key.len();
257	}
258
259	#[inline]
260	fn on_cleared(&mut self) {
261		self.current_heap_size = 0;
262	}
263
264	#[inline]
265	fn on_grow(&mut self, new_memory_usage: usize) -> bool {
266		new_memory_usage <= self.config.local_value_cache_max_inline_size
267	}
268}
269
270/// A struct to gather hit/miss stats to aid in debugging the performance of the cache.
271#[derive(Default)]
272struct HitStats {
273	shared_hits: AtomicU64,
274	shared_fetch_attempts: AtomicU64,
275	local_hits: AtomicU64,
276	local_fetch_attempts: AtomicU64,
277}
278
279impl HitStats {
280	/// Returns a snapshot of the hit/miss stats.
281	fn snapshot(&self) -> HitStatsSnapshot {
282		HitStatsSnapshot {
283			shared_hits: self.shared_hits.load(Ordering::Relaxed),
284			shared_fetch_attempts: self.shared_fetch_attempts.load(Ordering::Relaxed),
285			local_hits: self.local_hits.load(Ordering::Relaxed),
286			local_fetch_attempts: self.local_fetch_attempts.load(Ordering::Relaxed),
287		}
288	}
289}
290
291impl std::fmt::Display for HitStats {
292	fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
293		let snapshot = self.snapshot();
294		write!(f, "{}", snapshot)
295	}
296}
297
298/// A struct to gather hit/miss stats for the node cache and the value cache.
299#[derive(Default)]
300struct TrieHitStats {
301	node_cache: HitStats,
302	value_cache: HitStats,
303}
304
305impl TrieHitStats {
306	/// Returns a snapshot of the hit/miss stats.
307	fn snapshot(&self) -> TrieHitStatsSnapshot {
308		TrieHitStatsSnapshot {
309			node_cache: self.node_cache.snapshot(),
310			value_cache: self.value_cache.snapshot(),
311		}
312	}
313
314	/// Adds the stats from snapshot to this one.
315	fn add_snapshot(&self, other: &TrieHitStatsSnapshot) {
316		self.node_cache
317			.local_fetch_attempts
318			.fetch_add(other.node_cache.local_fetch_attempts, Ordering::Relaxed);
319
320		self.node_cache
321			.shared_fetch_attempts
322			.fetch_add(other.node_cache.shared_fetch_attempts, Ordering::Relaxed);
323
324		self.node_cache
325			.local_hits
326			.fetch_add(other.node_cache.local_hits, Ordering::Relaxed);
327
328		self.node_cache
329			.shared_hits
330			.fetch_add(other.node_cache.shared_hits, Ordering::Relaxed);
331
332		self.value_cache
333			.local_fetch_attempts
334			.fetch_add(other.value_cache.local_fetch_attempts, Ordering::Relaxed);
335
336		self.value_cache
337			.shared_fetch_attempts
338			.fetch_add(other.value_cache.shared_fetch_attempts, Ordering::Relaxed);
339
340		self.value_cache
341			.local_hits
342			.fetch_add(other.value_cache.local_hits, Ordering::Relaxed);
343
344		self.value_cache
345			.shared_hits
346			.fetch_add(other.value_cache.shared_hits, Ordering::Relaxed);
347	}
348}
349
350/// An internal struct to store the cached trie nodes.
351pub(crate) struct NodeCached<H> {
352	/// The cached node.
353	pub node: NodeOwned<H>,
354	/// Whether this node was fetched from the shared cache or not.
355	pub is_from_shared_cache: bool,
356}
357
358impl<H> NodeCached<H> {
359	/// Returns the number of bytes allocated on the heap by this node.
360	fn heap_size(&self) -> usize {
361		self.node.size_in_bytes() - std::mem::size_of::<NodeOwned<H>>()
362	}
363}
364
365type NodeCacheMap<H> = LruMap<H, NodeCached<H>, LocalNodeCacheLimiter, schnellru::RandomState>;
366
367type ValueCacheMap<H> = LruMap<
368	ValueCacheKey<H>,
369	CachedValue<H>,
370	LocalValueCacheLimiter,
371	BuildNoHashHasher<ValueCacheKey<H>>,
372>;
373
374type ValueAccessSet =
375	LruMap<ValueCacheKeyHash, (), schnellru::ByLength, BuildNoHashHasher<ValueCacheKeyHash>>;
376
377#[derive(Clone, Copy)]
378pub struct LocalValueCacheConfig {
379	// The maximum size of the value cache on the heap.
380	local_value_cache_max_heap_size: usize,
381	// The maximum size of the value cache in the inline storage.
382	local_value_cache_max_inline_size: usize,
383	// The maximum number of keys that can be promoted to the front of the LRU cache.
384	shared_value_cache_max_promoted_keys: u32,
385	// The maximum percentage of the shared cache that can be replaced, before giving up.
386	shared_value_cache_max_replace_percent: usize,
387}
388
389#[derive(Clone, Copy)]
390pub struct LocalNodeCacheConfig {
391	// The maximum size of the node cache on the heap.
392	local_node_cache_max_heap_size: usize,
393	// The maximum size of the node cache in the inline storage.
394	local_node_cache_max_inline_size: usize,
395	// The maximum number of keys that can be promoted to the front of the LRU cache, before giving
396	// up.
397	shared_node_cache_max_promoted_keys: u32,
398	// The maximum percentage of the shared cache that can be replaced, before giving up.
399	shared_node_cache_max_replace_percent: usize,
400}
401
402impl LocalNodeCacheConfig {
403	/// Creates a configuration that can be called from a trusted path and allows the local_cache
404	/// to grow to fit the needs, also everything is promoted to the shared cache.
405	///
406	/// This configuration is safe only for trusted paths because it allows the local cache
407	/// to grow up to the shared cache limits and it promotes all items into the shared cache.
408	/// This could lead to excessive memory usage if used in untrusted or uncontrolled environments.
409	/// It is intended for scenarios like block authoring or importing, where the operations
410	/// are bounded already and there are no risks of unbounded memory usage.
411	fn trusted(
412		local_node_cache_max_heap_size: usize,
413		local_node_cache_max_inline_size: usize,
414	) -> Self {
415		LocalNodeCacheConfig {
416			local_node_cache_max_heap_size: std::cmp::max(
417				local_node_cache_max_heap_size,
418				LOCAL_NODE_CACHE_MAX_HEAP_SIZE,
419			),
420			local_node_cache_max_inline_size: std::cmp::max(
421				local_node_cache_max_inline_size,
422				LOCAL_NODE_CACHE_MAX_INLINE_SIZE,
423			),
424			shared_node_cache_max_promoted_keys: u32::MAX,
425			shared_node_cache_max_replace_percent: 100,
426		}
427	}
428
429	/// Creates a configuration that can be called from an untrusted path.
430	///
431	/// It limits the local size of the cache and the amount of keys that can be promoted to the
432	/// shared cache.
433	fn untrusted() -> Self {
434		LocalNodeCacheConfig {
435			local_node_cache_max_inline_size: LOCAL_NODE_CACHE_MAX_INLINE_SIZE,
436			local_node_cache_max_heap_size: LOCAL_NODE_CACHE_MAX_HEAP_SIZE,
437			shared_node_cache_max_promoted_keys: SHARED_NODE_CACHE_MAX_PROMOTED_KEYS,
438			shared_node_cache_max_replace_percent: SHARED_NODE_CACHE_MAX_REPLACE_PERCENT,
439		}
440	}
441}
442
443impl LocalValueCacheConfig {
444	/// Creates a configuration that can be called from a trusted path and allows the local_cache
445	/// to grow to fit the needs, also everything is promoted to the shared cache.
446	///
447	/// This configuration is safe only for trusted paths because it allows the local cache
448	/// to grow up to the shared cache limits and it promotes all items into the shared cache.
449	/// This could lead to excessive memory usage if used in untrusted or uncontrolled environments.
450	/// It is intended for scenarios like block authoring or importing, where the operations
451	/// are bounded already and there are no risks of unbounded memory usage.
452	fn trusted(
453		local_value_cache_max_heap_size: usize,
454		local_value_cache_max_inline_size: usize,
455	) -> Self {
456		LocalValueCacheConfig {
457			shared_value_cache_max_promoted_keys: u32::MAX,
458			shared_value_cache_max_replace_percent: 100,
459			local_value_cache_max_inline_size: std::cmp::max(
460				local_value_cache_max_inline_size,
461				LOCAL_VALUE_CACHE_MAX_INLINE_SIZE,
462			),
463			local_value_cache_max_heap_size: std::cmp::max(
464				local_value_cache_max_heap_size,
465				LOCAL_VALUE_CACHE_MAX_HEAP_SIZE,
466			),
467		}
468	}
469
470	/// Creates a configuration that can be called from an untrusted path.
471	///
472	/// It limits the local size of the cache and the amount of keys that can be promoted to the
473	/// shared cache.
474	fn untrusted() -> Self {
475		LocalValueCacheConfig {
476			local_value_cache_max_inline_size: LOCAL_VALUE_CACHE_MAX_INLINE_SIZE,
477			local_value_cache_max_heap_size: LOCAL_VALUE_CACHE_MAX_HEAP_SIZE,
478			shared_value_cache_max_promoted_keys: SHARED_VALUE_CACHE_MAX_PROMOTED_KEYS,
479			shared_value_cache_max_replace_percent: SHARED_VALUE_CACHE_MAX_REPLACE_PERCENT,
480		}
481	}
482}
483
484/// The local trie cache.
485///
486/// This cache should be used per state instance created by the backend. One state instance is
487/// referring to the state of one block. It will cache all the accesses that are done to the state
488/// which could not be fulfilled by the [`SharedTrieCache`]. These locally cached items are merged
489/// back to the shared trie cache when this instance is dropped.
490///
491/// When using [`Self::as_trie_db_cache`] or [`Self::as_trie_db_mut_cache`], it will lock Mutexes.
492/// So, it is important that these methods are not called multiple times, because they otherwise
493/// deadlock.
494pub struct LocalTrieCache<H: Hasher> {
495	/// The shared trie cache that created this instance.
496	shared: SharedTrieCache<H>,
497
498	/// The local cache for the trie nodes.
499	node_cache: Mutex<NodeCacheMap<H::Out>>,
500
501	/// The local cache for the values.
502	value_cache: Mutex<ValueCacheMap<H::Out>>,
503
504	/// Keeps track of all values accessed in the shared cache.
505	///
506	/// This will be used to ensure that these nodes are brought to the front of the lru when this
507	/// local instance is merged back to the shared cache. This can actually lead to collision when
508	/// two [`ValueCacheKey`]s with different storage roots and keys map to the same hash. However,
509	/// as we only use this set to update the lru position it is fine, even if we bring the wrong
510	/// value to the top. The important part is that we always get the correct value from the value
511	/// cache for a given key.
512	shared_value_cache_access: Mutex<ValueAccessSet>,
513	/// The configuration for the value cache.
514	value_cache_config: LocalValueCacheConfig,
515	/// The configuration for the node cache.
516	node_cache_config: LocalNodeCacheConfig,
517	/// The stats for the cache.
518	stats: TrieHitStats,
519	/// Specifies if we are in a trusted path like block authoring and importing or not.
520	trusted: bool,
521}
522
523impl<H: Hasher> LocalTrieCache<H> {
524	/// Return self as a [`TrieDB`](trie_db::TrieDB) compatible cache.
525	///
526	/// The given `storage_root` needs to be the storage root of the trie this cache is used for.
527	pub fn as_trie_db_cache(&self, storage_root: H::Out) -> TrieCache<'_, H> {
528		let value_cache = ValueCache::ForStorageRoot {
529			storage_root,
530			local_value_cache: self.value_cache.lock(),
531			shared_value_cache_access: self.shared_value_cache_access.lock(),
532			buffered_value: None,
533		};
534
535		TrieCache {
536			shared_cache: self.shared.clone(),
537			local_cache: self.node_cache.lock(),
538			value_cache,
539			stats: &self.stats,
540		}
541	}
542
543	/// Return self as [`TrieDBMut`](trie_db::TrieDBMut) compatible cache.
544	///
545	/// After finishing all operations with [`TrieDBMut`](trie_db::TrieDBMut) and having obtained
546	/// the new storage root, [`TrieCache::merge_into`] should be called to update this local
547	/// cache instance. If the function is not called, cached data is just thrown away and not
548	/// propagated to the shared cache. So, accessing these new items will be slower, but nothing
549	/// would break because of this.
550	pub fn as_trie_db_mut_cache(&self) -> TrieCache<'_, H> {
551		TrieCache {
552			shared_cache: self.shared.clone(),
553			local_cache: self.node_cache.lock(),
554			value_cache: ValueCache::Fresh(Default::default()),
555			stats: &self.stats,
556		}
557	}
558}
559
560impl<H: Hasher> Drop for LocalTrieCache<H> {
561	fn drop(&mut self) {
562		tracing::debug!(
563			target: LOG_TARGET,
564			"Local node trie cache dropped: {}",
565			self.stats.node_cache
566		);
567
568		tracing::debug!(
569			target: LOG_TARGET,
570			"Local value trie cache dropped: {}",
571			self.stats.value_cache
572		);
573
574		let mut shared_inner = match self.shared.write_lock_inner() {
575			Some(inner) => inner,
576			None => {
577				tracing::warn!(
578					target: LOG_TARGET,
579					"Timeout while trying to acquire a write lock for the shared trie cache"
580				);
581				return;
582			},
583		};
584		let stats_snapshot = self.stats.snapshot();
585		shared_inner.stats_add_snapshot(&stats_snapshot);
586		let metrics = shared_inner.metrics().cloned();
587		metrics.as_ref().map(|metrics| metrics.observe_hits_stats(&stats_snapshot));
588		{
589			let _node_update_duration =
590				metrics.as_ref().map(|metrics| metrics.start_shared_node_update_timer());
591			let node_cache = self.node_cache.get_mut();
592
593			metrics
594				.as_ref()
595				.map(|metrics| metrics.observe_local_node_cache_length(node_cache.len()));
596
597			shared_inner.node_cache_mut().update(
598				node_cache.drain(),
599				&self.node_cache_config,
600				&metrics,
601			);
602		}
603
604		// Since the trie cache is not called from a time sensitive context like block authoring or
605		// block import give the option to a more important task to acquire the lock and do its
606		// job.
607		if !self.trusted {
608			RwLockWriteGuard::bump(&mut shared_inner);
609		}
610
611		{
612			let _node_update_duration =
613				metrics.as_ref().map(|metrics| metrics.start_shared_value_update_timer());
614			let value_cache = self.shared_value_cache_access.get_mut();
615			metrics
616				.as_ref()
617				.map(|metrics| metrics.observe_local_value_cache_length(value_cache.len()));
618
619			shared_inner.value_cache_mut().update(
620				self.value_cache.get_mut().drain(),
621				value_cache.drain().map(|(key, ())| key),
622				&self.value_cache_config,
623				&metrics,
624			);
625		}
626	}
627}
628
629/// The abstraction of the value cache for the [`TrieCache`].
630enum ValueCache<'a, H: Hasher> {
631	/// The value cache is fresh, aka not yet associated to any storage root.
632	/// This is used for example when a new trie is being build, to cache new values.
633	Fresh(HashMap<Arc<[u8]>, CachedValue<H::Out>>),
634	/// The value cache is already bound to a specific storage root.
635	ForStorageRoot {
636		shared_value_cache_access: MutexGuard<'a, ValueAccessSet>,
637		local_value_cache: MutexGuard<'a, ValueCacheMap<H::Out>>,
638		storage_root: H::Out,
639		// The shared value cache needs to be temporarily locked when reading from it
640		// so we need to clone the value that is returned, but we need to be able to
641		// return a reference to the value, so we just buffer it here.
642		buffered_value: Option<CachedValue<H::Out>>,
643	},
644}
645
646impl<H: Hasher> ValueCache<'_, H> {
647	/// Get the value for the given `key`.
648	fn get(
649		&mut self,
650		key: &[u8],
651		shared_cache: &SharedTrieCache<H>,
652		stats: &HitStats,
653	) -> Option<&CachedValue<H::Out>> {
654		stats.local_fetch_attempts.fetch_add(1, Ordering::Relaxed);
655
656		match self {
657			Self::Fresh(map) => {
658				if let Some(value) = map.get(key) {
659					stats.local_hits.fetch_add(1, Ordering::Relaxed);
660					Some(value)
661				} else {
662					None
663				}
664			},
665			Self::ForStorageRoot {
666				local_value_cache,
667				shared_value_cache_access,
668				storage_root,
669				buffered_value,
670			} => {
671				// We first need to look up in the local cache and then the shared cache.
672				// It can happen that some value is cached in the shared cache, but the
673				// weak reference of the data can not be upgraded anymore. This for example
674				// happens when the node is dropped that contains the strong reference to the data.
675				//
676				// So, the logic of the trie would lookup the data and the node and store both
677				// in our local caches.
678
679				let hash = ValueCacheKey::hash_data(key, storage_root);
680
681				if let Some(value) = local_value_cache
682					.peek_by_hash(hash.raw(), |existing_key, _| {
683						existing_key.is_eq(storage_root, key)
684					}) {
685					stats.local_hits.fetch_add(1, Ordering::Relaxed);
686
687					return Some(value);
688				}
689
690				stats.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed);
691				if let Some(value) = shared_cache.peek_value_by_hash(hash, storage_root, key) {
692					stats.shared_hits.fetch_add(1, Ordering::Relaxed);
693					shared_value_cache_access.insert(hash, ());
694					*buffered_value = Some(value.clone());
695					return buffered_value.as_ref();
696				}
697
698				None
699			},
700		}
701	}
702
703	/// Insert some new `value` under the given `key`.
704	fn insert(&mut self, key: &[u8], value: CachedValue<H::Out>) {
705		match self {
706			Self::Fresh(map) => {
707				map.insert(key.into(), value);
708			},
709			Self::ForStorageRoot { local_value_cache, storage_root, .. } => {
710				local_value_cache.insert(ValueCacheRef::new(key, *storage_root), value);
711			},
712		}
713	}
714}
715
716/// The actual [`TrieCache`](trie_db::TrieCache) implementation.
717///
718/// If this instance was created for using it with a [`TrieDBMut`](trie_db::TrieDBMut), it needs to
719/// be merged back into the [`LocalTrieCache`] with [`Self::merge_into`] after all operations are
720/// done.
721pub struct TrieCache<'a, H: Hasher> {
722	shared_cache: SharedTrieCache<H>,
723	local_cache: MutexGuard<'a, NodeCacheMap<H::Out>>,
724	value_cache: ValueCache<'a, H>,
725	stats: &'a TrieHitStats,
726}
727
728impl<'a, H: Hasher> TrieCache<'a, H> {
729	/// Merge this cache into the given [`LocalTrieCache`].
730	///
731	/// This function is only required to be called when this instance was created through
732	/// [`LocalTrieCache::as_trie_db_mut_cache`], otherwise this method is a no-op. The given
733	/// `storage_root` is the new storage root that was obtained after finishing all operations
734	/// using the [`TrieDBMut`](trie_db::TrieDBMut).
735	pub fn merge_into(self, local: &LocalTrieCache<H>, storage_root: H::Out) {
736		let ValueCache::Fresh(cache) = self.value_cache else { return };
737
738		if !cache.is_empty() {
739			let mut value_cache = local.value_cache.lock();
740			let partial_hash = ValueCacheKey::hash_partial_data(&storage_root);
741			cache.into_iter().for_each(|(k, v)| {
742				let hash = ValueCacheKeyHash::from_hasher_and_storage_key(partial_hash.clone(), &k);
743				let k = ValueCacheRef { storage_root, storage_key: &k, hash };
744				value_cache.insert(k, v);
745			});
746		}
747	}
748}
749
750impl<'a, H: Hasher> trie_db::TrieCache<NodeCodec<H>> for TrieCache<'a, H> {
751	fn get_or_insert_node(
752		&mut self,
753		hash: H::Out,
754		fetch_node: &mut dyn FnMut() -> trie_db::Result<NodeOwned<H::Out>, H::Out, Error<H::Out>>,
755	) -> trie_db::Result<&NodeOwned<H::Out>, H::Out, Error<H::Out>> {
756		let mut is_local_cache_hit = true;
757		self.stats.node_cache.local_fetch_attempts.fetch_add(1, Ordering::Relaxed);
758
759		// First try to grab the node from the local cache.
760		let node = self.local_cache.get_or_insert_fallible(hash, || {
761			is_local_cache_hit = false;
762
763			// It was not in the local cache; try the shared cache.
764			self.stats.node_cache.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed);
765			if let Some(node) = self.shared_cache.peek_node(&hash) {
766				self.stats.node_cache.shared_hits.fetch_add(1, Ordering::Relaxed);
767				tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache");
768
769				return Ok(NodeCached::<H::Out> { node: node.clone(), is_from_shared_cache: true });
770			}
771
772			// It was not in the shared cache; try fetching it from the database.
773			match fetch_node() {
774				Ok(node) => {
775					tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from database");
776					Ok(NodeCached::<H::Out> { node, is_from_shared_cache: false })
777				},
778				Err(error) => {
779					tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from database failed");
780					Err(error)
781				},
782			}
783		});
784
785		if is_local_cache_hit {
786			tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from local cache");
787			self.stats.node_cache.local_hits.fetch_add(1, Ordering::Relaxed);
788		}
789
790		Ok(&node?
791			.expect("you can always insert at least one element into the local cache; qed")
792			.node)
793	}
794
795	fn get_node(&mut self, hash: &H::Out) -> Option<&NodeOwned<H::Out>> {
796		let mut is_local_cache_hit = true;
797		self.stats.node_cache.local_fetch_attempts.fetch_add(1, Ordering::Relaxed);
798
799		// First try to grab the node from the local cache.
800		let cached_node = self.local_cache.get_or_insert_fallible(*hash, || {
801			is_local_cache_hit = false;
802
803			// It was not in the local cache; try the shared cache.
804			self.stats.node_cache.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed);
805			if let Some(node) = self.shared_cache.peek_node(&hash) {
806				self.stats.node_cache.shared_hits.fetch_add(1, Ordering::Relaxed);
807				tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache");
808
809				Ok(NodeCached::<H::Out> { node: node.clone(), is_from_shared_cache: true })
810			} else {
811				tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from cache failed");
812
813				Err(())
814			}
815		});
816
817		if is_local_cache_hit {
818			tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from local cache");
819			self.stats.node_cache.local_hits.fetch_add(1, Ordering::Relaxed);
820		}
821
822		match cached_node {
823			Ok(Some(cached_node)) => Some(&cached_node.node),
824			Ok(None) => {
825				unreachable!(
826					"you can always insert at least one element into the local cache; qed"
827				);
828			},
829			Err(()) => None,
830		}
831	}
832
833	fn lookup_value_for_key(&mut self, key: &[u8]) -> Option<&CachedValue<H::Out>> {
834		let res = self.value_cache.get(key, &self.shared_cache, &self.stats.value_cache);
835
836		tracing::trace!(
837			target: LOG_TARGET,
838			key = ?sp_core::hexdisplay::HexDisplay::from(&key),
839			found = res.is_some(),
840			"Looked up value for key",
841		);
842
843		res
844	}
845
846	fn cache_value_for_key(&mut self, key: &[u8], data: CachedValue<H::Out>) {
847		tracing::trace!(
848			target: LOG_TARGET,
849			key = ?sp_core::hexdisplay::HexDisplay::from(&key),
850			"Caching value for key",
851		);
852
853		self.value_cache.insert(key, data);
854	}
855}
856
857#[cfg(test)]
858mod tests {
859	use super::*;
860	use rand::{thread_rng, Rng};
861	use sp_core::H256;
862	use trie_db::{Bytes, Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut};
863
864	type MemoryDB = crate::MemoryDB<sp_core::Blake2Hasher>;
865	type Layout = crate::LayoutV1<sp_core::Blake2Hasher>;
866	type Cache = super::SharedTrieCache<sp_core::Blake2Hasher>;
867	type Recorder = crate::recorder::Recorder<sp_core::Blake2Hasher>;
868
869	const TEST_DATA: &[(&[u8], &[u8])] =
870		&[(b"key1", b"val1"), (b"key2", &[2; 64]), (b"key3", b"val3"), (b"key4", &[4; 64])];
871	const CACHE_SIZE_RAW: usize = 1024 * 10;
872	const CACHE_SIZE: CacheSize = CacheSize::new(CACHE_SIZE_RAW);
873
874	fn create_trie() -> (MemoryDB, TrieHash<Layout>) {
875		let mut db = MemoryDB::default();
876		let mut root = Default::default();
877
878		{
879			let mut trie = TrieDBMutBuilder::<Layout>::new(&mut db, &mut root).build();
880			for (k, v) in TEST_DATA {
881				trie.insert(k, v).expect("Inserts data");
882			}
883		}
884
885		(db, root)
886	}
887
888	#[test]
889	fn basic_cache_works() {
890		let (db, root) = create_trie();
891
892		let shared_cache = Cache::new(CACHE_SIZE, None);
893		let local_cache = shared_cache.local_cache_untrusted();
894
895		{
896			let mut cache = local_cache.as_trie_db_cache(root);
897			let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
898			assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap());
899		}
900
901		// Local cache wasn't dropped yet, so there should nothing in the shared caches.
902		assert!(shared_cache.read_lock_inner().value_cache().lru.is_empty());
903		assert!(shared_cache.read_lock_inner().node_cache().lru.is_empty());
904
905		drop(local_cache);
906
907		// Now we should have the cached items in the shared cache.
908		assert!(shared_cache.read_lock_inner().node_cache().lru.len() >= 1);
909		let cached_data = shared_cache
910			.read_lock_inner()
911			.value_cache()
912			.lru
913			.peek(&ValueCacheKey::new_value(TEST_DATA[0].0, root))
914			.unwrap()
915			.clone();
916		assert_eq!(Bytes::from(TEST_DATA[0].1.to_vec()), cached_data.data().flatten().unwrap());
917
918		let fake_data = Bytes::from(&b"fake_data"[..]);
919
920		let local_cache = shared_cache.local_cache_untrusted();
921		shared_cache.write_lock_inner().unwrap().value_cache_mut().lru.insert(
922			ValueCacheKey::new_value(TEST_DATA[1].0, root),
923			(fake_data.clone(), Default::default()).into(),
924		);
925
926		{
927			let mut cache = local_cache.as_trie_db_cache(root);
928			let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
929
930			// We should now get the "fake_data", because we inserted this manually to the cache.
931			assert_eq!(b"fake_data".to_vec(), trie.get(TEST_DATA[1].0).unwrap().unwrap());
932		}
933	}
934
935	#[test]
936	fn trie_db_mut_cache_works() {
937		let (mut db, root) = create_trie();
938
939		let new_key = b"new_key".to_vec();
940		// Use some long value to not have it inlined
941		let new_value = vec![23; 64];
942
943		let shared_cache = Cache::new(CACHE_SIZE, None);
944		let mut new_root = root;
945
946		{
947			let local_cache = shared_cache.local_cache_untrusted();
948
949			let mut cache = local_cache.as_trie_db_mut_cache();
950
951			{
952				let mut trie = TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
953					.with_cache(&mut cache)
954					.build();
955
956				trie.insert(&new_key, &new_value).unwrap();
957			}
958
959			cache.merge_into(&local_cache, new_root);
960		}
961
962		// After the local cache is dropped, all changes should have been merged back to the shared
963		// cache.
964		let cached_data = shared_cache
965			.read_lock_inner()
966			.value_cache()
967			.lru
968			.peek(&ValueCacheKey::new_value(new_key, new_root))
969			.unwrap()
970			.clone();
971		assert_eq!(Bytes::from(new_value), cached_data.data().flatten().unwrap());
972	}
973
974	#[test]
975	fn trie_db_cache_and_recorder_work_together() {
976		let (db, root) = create_trie();
977
978		let shared_cache = Cache::new(CACHE_SIZE, None);
979
980		for i in 0..5 {
981			// Clear some of the caches.
982			if i == 2 {
983				shared_cache.reset_node_cache();
984			} else if i == 3 {
985				shared_cache.reset_value_cache();
986			}
987
988			let local_cache = shared_cache.local_cache_untrusted();
989			let recorder = Recorder::default();
990
991			{
992				let mut cache = local_cache.as_trie_db_cache(root);
993				let mut recorder = recorder.as_trie_recorder(root);
994				let trie = TrieDBBuilder::<Layout>::new(&db, &root)
995					.with_cache(&mut cache)
996					.with_recorder(&mut recorder)
997					.build();
998
999				for (key, value) in TEST_DATA {
1000					assert_eq!(*value, trie.get(&key).unwrap().unwrap());
1001				}
1002			}
1003
1004			let storage_proof = recorder.drain_storage_proof();
1005			let memory_db: MemoryDB = storage_proof.into_memory_db();
1006
1007			{
1008				let trie = TrieDBBuilder::<Layout>::new(&memory_db, &root).build();
1009
1010				for (key, value) in TEST_DATA {
1011					assert_eq!(*value, trie.get(&key).unwrap().unwrap());
1012				}
1013			}
1014		}
1015	}
1016
1017	#[test]
1018	fn trie_db_mut_cache_and_recorder_work_together() {
1019		const DATA_TO_ADD: &[(&[u8], &[u8])] = &[(b"key11", &[45; 78]), (b"key33", &[78; 89])];
1020
1021		let (db, root) = create_trie();
1022
1023		let shared_cache = Cache::new(CACHE_SIZE, None);
1024
1025		// Run this twice so that we use the data cache in the second run.
1026		for i in 0..5 {
1027			// Clear some of the caches.
1028			if i == 2 {
1029				shared_cache.reset_node_cache();
1030			} else if i == 3 {
1031				shared_cache.reset_value_cache();
1032			}
1033
1034			let recorder = Recorder::default();
1035			let local_cache = shared_cache.local_cache_untrusted();
1036			let mut new_root = root;
1037
1038			{
1039				let mut db = db.clone();
1040				let mut cache = local_cache.as_trie_db_cache(root);
1041				let mut recorder = recorder.as_trie_recorder(root);
1042				let mut trie = TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
1043					.with_cache(&mut cache)
1044					.with_recorder(&mut recorder)
1045					.build();
1046
1047				for (key, value) in DATA_TO_ADD {
1048					trie.insert(key, value).unwrap();
1049				}
1050			}
1051
1052			let storage_proof = recorder.drain_storage_proof();
1053			let mut memory_db: MemoryDB = storage_proof.into_memory_db();
1054			let mut proof_root = root;
1055
1056			{
1057				let mut trie =
1058					TrieDBMutBuilder::<Layout>::from_existing(&mut memory_db, &mut proof_root)
1059						.build();
1060
1061				for (key, value) in DATA_TO_ADD {
1062					trie.insert(key, value).unwrap();
1063				}
1064			}
1065
1066			assert_eq!(new_root, proof_root)
1067		}
1068	}
1069
1070	#[test]
1071	fn cache_lru_works() {
1072		let (db, root) = create_trie();
1073
1074		let shared_cache = Cache::new(CACHE_SIZE, None);
1075
1076		{
1077			let local_cache = shared_cache.local_cache_untrusted();
1078
1079			let mut cache = local_cache.as_trie_db_cache(root);
1080			let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
1081
1082			for (k, _) in TEST_DATA {
1083				trie.get(k).unwrap().unwrap();
1084			}
1085		}
1086
1087		// Check that all items are there.
1088		assert!(shared_cache
1089			.read_lock_inner()
1090			.value_cache()
1091			.lru
1092			.iter()
1093			.map(|d| d.0)
1094			.all(|l| TEST_DATA.iter().any(|d| &*l.storage_key == d.0)));
1095
1096		// Run this in a loop. The first time we check that with the filled value cache,
1097		// the expected values are at the top of the LRU.
1098		// The second run is using an empty value cache to ensure that we access the nodes.
1099		for _ in 0..2 {
1100			{
1101				let local_cache = shared_cache.local_cache_untrusted();
1102
1103				let mut cache = local_cache.as_trie_db_cache(root);
1104				let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
1105
1106				for (k, _) in TEST_DATA.iter().take(2) {
1107					trie.get(k).unwrap().unwrap();
1108				}
1109			}
1110
1111			// Ensure that the accessed items are most recently used items of the shared value
1112			// cache.
1113			assert!(shared_cache
1114				.read_lock_inner()
1115				.value_cache()
1116				.lru
1117				.iter()
1118				.take(2)
1119				.map(|d| d.0)
1120				.all(|l| { TEST_DATA.iter().take(2).any(|d| &*l.storage_key == d.0) }));
1121
1122			// Delete the value cache, so that we access the nodes.
1123			shared_cache.reset_value_cache();
1124		}
1125
1126		let most_recently_used_nodes = shared_cache
1127			.read_lock_inner()
1128			.node_cache()
1129			.lru
1130			.iter()
1131			.map(|d| *d.0)
1132			.collect::<Vec<_>>();
1133
1134		{
1135			let local_cache = shared_cache.local_cache_untrusted();
1136
1137			let mut cache = local_cache.as_trie_db_cache(root);
1138			let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
1139
1140			for (k, _) in TEST_DATA.iter().skip(2) {
1141				trie.get(k).unwrap().unwrap();
1142			}
1143		}
1144
1145		// Ensure that the most recently used nodes changed as well.
1146		assert_ne!(
1147			most_recently_used_nodes,
1148			shared_cache
1149				.read_lock_inner()
1150				.node_cache()
1151				.lru
1152				.iter()
1153				.map(|d| *d.0)
1154				.collect::<Vec<_>>()
1155		);
1156	}
1157
1158	#[test]
1159	fn cache_respects_bounds() {
1160		let (mut db, root) = create_trie();
1161
1162		let shared_cache = Cache::new(CACHE_SIZE, None);
1163		{
1164			let local_cache = shared_cache.local_cache_untrusted();
1165
1166			let mut new_root = root;
1167
1168			{
1169				let mut cache = local_cache.as_trie_db_cache(root);
1170				{
1171					let mut trie =
1172						TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
1173							.with_cache(&mut cache)
1174							.build();
1175
1176					let value = vec![10u8; 100];
1177					// Ensure we add enough data that would overflow the cache.
1178					for i in 0..CACHE_SIZE_RAW / 100 * 2 {
1179						trie.insert(format!("key{}", i).as_bytes(), &value).unwrap();
1180					}
1181				}
1182
1183				cache.merge_into(&local_cache, new_root);
1184			}
1185		}
1186
1187		assert!(shared_cache.used_memory_size() < CACHE_SIZE_RAW);
1188	}
1189
1190	#[test]
1191	fn test_trusted_works() {
1192		let (mut db, root) = create_trie();
1193		// Configure cache size to make sure it is large enough to hold all the data.
1194		let cache_size = CacheSize::new(1024 * 1024 * 1024);
1195		let num_test_keys: usize = 40000;
1196		let shared_cache = Cache::new(cache_size, None);
1197
1198		// Create a random array of bytes to use as a value.
1199		let mut rng = thread_rng();
1200		let random_keys: Vec<Vec<u8>> =
1201			(0..num_test_keys).map(|_| (0..100).map(|_| rng.gen()).collect()).collect();
1202
1203		let value = vec![10u8; 100];
1204
1205		// Populate the trie cache with, use a local untrusted cache and confirm not everything ends
1206		// up in the shared trie cache.
1207		let root = {
1208			let local_cache = shared_cache.local_cache_untrusted();
1209
1210			let mut new_root = root;
1211
1212			{
1213				let mut cache = local_cache.as_trie_db_mut_cache();
1214				{
1215					let mut trie =
1216						TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
1217							.with_cache(&mut cache)
1218							.build();
1219
1220					// Ensure we add enough data that would overflow the cache.
1221					for key in random_keys.iter() {
1222						trie.insert(key.as_ref(), &value).unwrap();
1223					}
1224				}
1225
1226				cache.merge_into(&local_cache, new_root);
1227			}
1228			new_root
1229		};
1230		let shared_value_cache_len = shared_cache.read_lock_inner().value_cache().lru.len();
1231		assert!(shared_value_cache_len < num_test_keys / 10);
1232
1233		// Read keys and check shared cache hits we should have a lot of misses.
1234		let stats = read_to_check_cache(&shared_cache, &mut db, root, &random_keys, value.clone());
1235		assert_eq!(stats.value_cache.shared_hits, shared_value_cache_len as u64);
1236
1237		assert_ne!(stats.value_cache.shared_fetch_attempts, stats.value_cache.shared_hits);
1238		assert_ne!(stats.node_cache.shared_fetch_attempts, stats.node_cache.shared_hits);
1239
1240		// Update the keys in the trie and check on subsequent reads all reads hit the shared cache.
1241		let shared_value_cache_len = shared_cache.read_lock_inner().value_cache().lru.len();
1242		let new_value = vec![9u8; 100];
1243		let root = {
1244			let local_cache = shared_cache.local_cache_trusted();
1245
1246			let mut new_root = root;
1247
1248			{
1249				let mut cache = local_cache.as_trie_db_mut_cache();
1250				{
1251					let mut trie =
1252						TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
1253							.with_cache(&mut cache)
1254							.build();
1255
1256					// Ensure we add enough data that would overflow the cache.
1257					for key in random_keys.iter() {
1258						trie.insert(key.as_ref(), &new_value).unwrap();
1259					}
1260				}
1261
1262				cache.merge_into(&local_cache, new_root);
1263			}
1264			new_root
1265		};
1266
1267		// Check on subsequent reads all reads hit the shared cache.
1268		let stats =
1269			read_to_check_cache(&shared_cache, &mut db, root, &random_keys, new_value.clone());
1270
1271		assert_eq!(stats.value_cache.shared_fetch_attempts, stats.value_cache.shared_hits);
1272		assert_eq!(stats.node_cache.shared_fetch_attempts, stats.node_cache.shared_hits);
1273
1274		assert_eq!(stats.value_cache.shared_fetch_attempts, stats.value_cache.local_fetch_attempts);
1275		assert_eq!(stats.node_cache.shared_fetch_attempts, stats.node_cache.local_fetch_attempts);
1276
1277		// The length of the shared value cache should contain everything that existed before + all
1278		// keys that got updated with a trusted cache.
1279		assert_eq!(
1280			shared_cache.read_lock_inner().value_cache().lru.len(),
1281			shared_value_cache_len + num_test_keys
1282		);
1283	}
1284
1285	// Helper function to read from the trie.
1286	//
1287	// Returns the cache stats.
1288	fn read_to_check_cache(
1289		shared_cache: &Cache,
1290		db: &mut MemoryDB,
1291		root: H256,
1292		keys: &Vec<Vec<u8>>,
1293		expected_value: Vec<u8>,
1294	) -> TrieHitStatsSnapshot {
1295		let local_cache = shared_cache.local_cache_untrusted();
1296		let mut cache = local_cache.as_trie_db_cache(root);
1297		let trie = TrieDBBuilder::<Layout>::new(db, &root).with_cache(&mut cache).build();
1298
1299		for key in keys.iter() {
1300			assert_eq!(trie.get(key.as_ref()).unwrap().unwrap(), expected_value);
1301		}
1302		local_cache.stats.snapshot()
1303	}
1304}