1use crate::{Error, NodeCodec};
38use hash_db::Hasher;
39use metrics::{HitStatsSnapshot, TrieHitStatsSnapshot};
40use nohash_hasher::BuildNoHashHasher;
41use parking_lot::{Mutex, MutexGuard, RwLockWriteGuard};
42use schnellru::LruMap;
43use shared_cache::{ValueCacheKey, ValueCacheRef};
44use std::{
45 collections::HashMap,
46 sync::{
47 atomic::{AtomicU64, Ordering},
48 Arc,
49 },
50 time::Duration,
51};
52use trie_db::{node::NodeOwned, CachedValue};
53
54mod metrics;
55mod shared_cache;
56
57pub use shared_cache::SharedTrieCache;
58
59use self::shared_cache::ValueCacheKeyHash;
60
61const LOG_TARGET: &str = "trie-cache";
62
63const SHARED_CACHE_WRITE_LOCK_TIMEOUT: Duration = Duration::from_millis(100);
68
69const SHARED_NODE_CACHE_MAX_PROMOTED_KEYS: u32 = 1792;
75const SHARED_VALUE_CACHE_MAX_PROMOTED_KEYS: u32 = 1792;
77
78const SHARED_NODE_CACHE_MAX_REPLACE_PERCENT: usize = 33;
84const SHARED_VALUE_CACHE_MAX_REPLACE_PERCENT: usize = 33;
86
87const LOCAL_NODE_CACHE_MAX_INLINE_SIZE: usize = 512 * 1024;
92const LOCAL_VALUE_CACHE_MAX_INLINE_SIZE: usize = 512 * 1024;
94
95const LOCAL_NODE_CACHE_MAX_HEAP_SIZE: usize = 8 * 1024 * 1024;
101const LOCAL_VALUE_CACHE_MAX_HEAP_SIZE: usize = 2 * 1024 * 1024;
103
104#[derive(Debug, Clone, Copy)]
106pub struct CacheSize(usize);
107
108impl CacheSize {
109 pub const fn unlimited() -> Self {
111 CacheSize(usize::MAX)
112 }
113
114 pub const fn new(bytes: usize) -> Self {
116 CacheSize(bytes)
117 }
118}
119
120pub struct LocalNodeCacheLimiter {
121 current_heap_size: usize,
125 config: LocalNodeCacheConfig,
126}
127
128impl LocalNodeCacheLimiter {
129 pub fn new(config: LocalNodeCacheConfig) -> Self {
131 Self { config, current_heap_size: 0 }
132 }
133}
134
135impl<H> schnellru::Limiter<H, NodeCached<H>> for LocalNodeCacheLimiter
136where
137 H: AsRef<[u8]> + std::fmt::Debug,
138{
139 type KeyToInsert<'a> = H;
140 type LinkType = u32;
141
142 #[inline]
143 fn is_over_the_limit(&self, length: usize) -> bool {
144 if length <= 1 {
147 return false;
148 }
149
150 self.current_heap_size > self.config.local_node_cache_max_heap_size
151 }
152
153 #[inline]
154 fn on_insert<'a>(
155 &mut self,
156 _length: usize,
157 key: H,
158 cached_node: NodeCached<H>,
159 ) -> Option<(H, NodeCached<H>)> {
160 self.current_heap_size += cached_node.heap_size();
161 Some((key, cached_node))
162 }
163
164 #[inline]
165 fn on_replace(
166 &mut self,
167 _length: usize,
168 _old_key: &mut H,
169 _new_key: H,
170 old_node: &mut NodeCached<H>,
171 new_node: &mut NodeCached<H>,
172 ) -> bool {
173 debug_assert_eq!(_old_key.as_ref().len(), _new_key.as_ref().len());
174 self.current_heap_size =
175 self.current_heap_size + new_node.heap_size() - old_node.heap_size();
176 true
177 }
178
179 #[inline]
180 fn on_removed(&mut self, _key: &mut H, cached_node: &mut NodeCached<H>) {
181 self.current_heap_size -= cached_node.heap_size();
182 }
183
184 #[inline]
185 fn on_cleared(&mut self) {
186 self.current_heap_size = 0;
187 }
188
189 #[inline]
190 fn on_grow(&mut self, new_memory_usage: usize) -> bool {
191 new_memory_usage <= self.config.local_node_cache_max_inline_size
192 }
193}
194
195pub struct LocalValueCacheLimiter {
197 current_heap_size: usize,
201
202 config: LocalValueCacheConfig,
203}
204
205impl LocalValueCacheLimiter {
206 pub fn new(config: LocalValueCacheConfig) -> Self {
208 Self { config, current_heap_size: 0 }
209 }
210}
211
212impl<H> schnellru::Limiter<ValueCacheKey<H>, CachedValue<H>> for LocalValueCacheLimiter
213where
214 H: AsRef<[u8]>,
215{
216 type KeyToInsert<'a> = ValueCacheRef<'a, H>;
217 type LinkType = u32;
218
219 #[inline]
220 fn is_over_the_limit(&self, length: usize) -> bool {
221 if length <= 1 {
224 return false;
225 }
226
227 self.current_heap_size > self.config.local_value_cache_max_heap_size
228 }
229
230 #[inline]
231 fn on_insert(
232 &mut self,
233 _length: usize,
234 key: Self::KeyToInsert<'_>,
235 value: CachedValue<H>,
236 ) -> Option<(ValueCacheKey<H>, CachedValue<H>)> {
237 self.current_heap_size += key.storage_key.len();
238 Some((key.into(), value))
239 }
240
241 #[inline]
242 fn on_replace(
243 &mut self,
244 _length: usize,
245 _old_key: &mut ValueCacheKey<H>,
246 _new_key: ValueCacheRef<H>,
247 _old_value: &mut CachedValue<H>,
248 _new_value: &mut CachedValue<H>,
249 ) -> bool {
250 debug_assert_eq!(_old_key.storage_key.len(), _new_key.storage_key.len());
251 true
252 }
253
254 #[inline]
255 fn on_removed(&mut self, key: &mut ValueCacheKey<H>, _: &mut CachedValue<H>) {
256 self.current_heap_size -= key.storage_key.len();
257 }
258
259 #[inline]
260 fn on_cleared(&mut self) {
261 self.current_heap_size = 0;
262 }
263
264 #[inline]
265 fn on_grow(&mut self, new_memory_usage: usize) -> bool {
266 new_memory_usage <= self.config.local_value_cache_max_inline_size
267 }
268}
269
270#[derive(Default)]
272struct HitStats {
273 shared_hits: AtomicU64,
274 shared_fetch_attempts: AtomicU64,
275 local_hits: AtomicU64,
276 local_fetch_attempts: AtomicU64,
277}
278
279impl HitStats {
280 fn snapshot(&self) -> HitStatsSnapshot {
282 HitStatsSnapshot {
283 shared_hits: self.shared_hits.load(Ordering::Relaxed),
284 shared_fetch_attempts: self.shared_fetch_attempts.load(Ordering::Relaxed),
285 local_hits: self.local_hits.load(Ordering::Relaxed),
286 local_fetch_attempts: self.local_fetch_attempts.load(Ordering::Relaxed),
287 }
288 }
289}
290
291impl std::fmt::Display for HitStats {
292 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
293 let snapshot = self.snapshot();
294 write!(f, "{}", snapshot)
295 }
296}
297
298#[derive(Default)]
300struct TrieHitStats {
301 node_cache: HitStats,
302 value_cache: HitStats,
303}
304
305impl TrieHitStats {
306 fn snapshot(&self) -> TrieHitStatsSnapshot {
308 TrieHitStatsSnapshot {
309 node_cache: self.node_cache.snapshot(),
310 value_cache: self.value_cache.snapshot(),
311 }
312 }
313
314 fn add_snapshot(&self, other: &TrieHitStatsSnapshot) {
316 self.node_cache
317 .local_fetch_attempts
318 .fetch_add(other.node_cache.local_fetch_attempts, Ordering::Relaxed);
319
320 self.node_cache
321 .shared_fetch_attempts
322 .fetch_add(other.node_cache.shared_fetch_attempts, Ordering::Relaxed);
323
324 self.node_cache
325 .local_hits
326 .fetch_add(other.node_cache.local_hits, Ordering::Relaxed);
327
328 self.node_cache
329 .shared_hits
330 .fetch_add(other.node_cache.shared_hits, Ordering::Relaxed);
331
332 self.value_cache
333 .local_fetch_attempts
334 .fetch_add(other.value_cache.local_fetch_attempts, Ordering::Relaxed);
335
336 self.value_cache
337 .shared_fetch_attempts
338 .fetch_add(other.value_cache.shared_fetch_attempts, Ordering::Relaxed);
339
340 self.value_cache
341 .local_hits
342 .fetch_add(other.value_cache.local_hits, Ordering::Relaxed);
343
344 self.value_cache
345 .shared_hits
346 .fetch_add(other.value_cache.shared_hits, Ordering::Relaxed);
347 }
348}
349
350pub(crate) struct NodeCached<H> {
352 pub node: NodeOwned<H>,
354 pub is_from_shared_cache: bool,
356}
357
358impl<H> NodeCached<H> {
359 fn heap_size(&self) -> usize {
361 self.node.size_in_bytes() - std::mem::size_of::<NodeOwned<H>>()
362 }
363}
364
365type NodeCacheMap<H> = LruMap<H, NodeCached<H>, LocalNodeCacheLimiter, schnellru::RandomState>;
366
367type ValueCacheMap<H> = LruMap<
368 ValueCacheKey<H>,
369 CachedValue<H>,
370 LocalValueCacheLimiter,
371 BuildNoHashHasher<ValueCacheKey<H>>,
372>;
373
374type ValueAccessSet =
375 LruMap<ValueCacheKeyHash, (), schnellru::ByLength, BuildNoHashHasher<ValueCacheKeyHash>>;
376
377#[derive(Clone, Copy)]
378pub struct LocalValueCacheConfig {
379 local_value_cache_max_heap_size: usize,
381 local_value_cache_max_inline_size: usize,
383 shared_value_cache_max_promoted_keys: u32,
385 shared_value_cache_max_replace_percent: usize,
387}
388
389#[derive(Clone, Copy)]
390pub struct LocalNodeCacheConfig {
391 local_node_cache_max_heap_size: usize,
393 local_node_cache_max_inline_size: usize,
395 shared_node_cache_max_promoted_keys: u32,
398 shared_node_cache_max_replace_percent: usize,
400}
401
402impl LocalNodeCacheConfig {
403 fn trusted(
412 local_node_cache_max_heap_size: usize,
413 local_node_cache_max_inline_size: usize,
414 ) -> Self {
415 LocalNodeCacheConfig {
416 local_node_cache_max_heap_size: std::cmp::max(
417 local_node_cache_max_heap_size,
418 LOCAL_NODE_CACHE_MAX_HEAP_SIZE,
419 ),
420 local_node_cache_max_inline_size: std::cmp::max(
421 local_node_cache_max_inline_size,
422 LOCAL_NODE_CACHE_MAX_INLINE_SIZE,
423 ),
424 shared_node_cache_max_promoted_keys: u32::MAX,
425 shared_node_cache_max_replace_percent: 100,
426 }
427 }
428
429 fn untrusted() -> Self {
434 LocalNodeCacheConfig {
435 local_node_cache_max_inline_size: LOCAL_NODE_CACHE_MAX_INLINE_SIZE,
436 local_node_cache_max_heap_size: LOCAL_NODE_CACHE_MAX_HEAP_SIZE,
437 shared_node_cache_max_promoted_keys: SHARED_NODE_CACHE_MAX_PROMOTED_KEYS,
438 shared_node_cache_max_replace_percent: SHARED_NODE_CACHE_MAX_REPLACE_PERCENT,
439 }
440 }
441}
442
443impl LocalValueCacheConfig {
444 fn trusted(
453 local_value_cache_max_heap_size: usize,
454 local_value_cache_max_inline_size: usize,
455 ) -> Self {
456 LocalValueCacheConfig {
457 shared_value_cache_max_promoted_keys: u32::MAX,
458 shared_value_cache_max_replace_percent: 100,
459 local_value_cache_max_inline_size: std::cmp::max(
460 local_value_cache_max_inline_size,
461 LOCAL_VALUE_CACHE_MAX_INLINE_SIZE,
462 ),
463 local_value_cache_max_heap_size: std::cmp::max(
464 local_value_cache_max_heap_size,
465 LOCAL_VALUE_CACHE_MAX_HEAP_SIZE,
466 ),
467 }
468 }
469
470 fn untrusted() -> Self {
475 LocalValueCacheConfig {
476 local_value_cache_max_inline_size: LOCAL_VALUE_CACHE_MAX_INLINE_SIZE,
477 local_value_cache_max_heap_size: LOCAL_VALUE_CACHE_MAX_HEAP_SIZE,
478 shared_value_cache_max_promoted_keys: SHARED_VALUE_CACHE_MAX_PROMOTED_KEYS,
479 shared_value_cache_max_replace_percent: SHARED_VALUE_CACHE_MAX_REPLACE_PERCENT,
480 }
481 }
482}
483
484pub struct LocalTrieCache<H: Hasher> {
495 shared: SharedTrieCache<H>,
497
498 node_cache: Mutex<NodeCacheMap<H::Out>>,
500
501 value_cache: Mutex<ValueCacheMap<H::Out>>,
503
504 shared_value_cache_access: Mutex<ValueAccessSet>,
513 value_cache_config: LocalValueCacheConfig,
515 node_cache_config: LocalNodeCacheConfig,
517 stats: TrieHitStats,
519 trusted: bool,
521}
522
523impl<H: Hasher> LocalTrieCache<H> {
524 pub fn as_trie_db_cache(&self, storage_root: H::Out) -> TrieCache<'_, H> {
528 let value_cache = ValueCache::ForStorageRoot {
529 storage_root,
530 local_value_cache: self.value_cache.lock(),
531 shared_value_cache_access: self.shared_value_cache_access.lock(),
532 buffered_value: None,
533 };
534
535 TrieCache {
536 shared_cache: self.shared.clone(),
537 local_cache: self.node_cache.lock(),
538 value_cache,
539 stats: &self.stats,
540 }
541 }
542
543 pub fn as_trie_db_mut_cache(&self) -> TrieCache<'_, H> {
551 TrieCache {
552 shared_cache: self.shared.clone(),
553 local_cache: self.node_cache.lock(),
554 value_cache: ValueCache::Fresh(Default::default()),
555 stats: &self.stats,
556 }
557 }
558}
559
560impl<H: Hasher> Drop for LocalTrieCache<H> {
561 fn drop(&mut self) {
562 tracing::debug!(
563 target: LOG_TARGET,
564 "Local node trie cache dropped: {}",
565 self.stats.node_cache
566 );
567
568 tracing::debug!(
569 target: LOG_TARGET,
570 "Local value trie cache dropped: {}",
571 self.stats.value_cache
572 );
573
574 let mut shared_inner = match self.shared.write_lock_inner() {
575 Some(inner) => inner,
576 None => {
577 tracing::warn!(
578 target: LOG_TARGET,
579 "Timeout while trying to acquire a write lock for the shared trie cache"
580 );
581 return;
582 },
583 };
584 let stats_snapshot = self.stats.snapshot();
585 shared_inner.stats_add_snapshot(&stats_snapshot);
586 let metrics = shared_inner.metrics().cloned();
587 metrics.as_ref().map(|metrics| metrics.observe_hits_stats(&stats_snapshot));
588 {
589 let _node_update_duration =
590 metrics.as_ref().map(|metrics| metrics.start_shared_node_update_timer());
591 let node_cache = self.node_cache.get_mut();
592
593 metrics
594 .as_ref()
595 .map(|metrics| metrics.observe_local_node_cache_length(node_cache.len()));
596
597 shared_inner.node_cache_mut().update(
598 node_cache.drain(),
599 &self.node_cache_config,
600 &metrics,
601 );
602 }
603
604 if !self.trusted {
608 RwLockWriteGuard::bump(&mut shared_inner);
609 }
610
611 {
612 let _node_update_duration =
613 metrics.as_ref().map(|metrics| metrics.start_shared_value_update_timer());
614 let value_cache = self.shared_value_cache_access.get_mut();
615 metrics
616 .as_ref()
617 .map(|metrics| metrics.observe_local_value_cache_length(value_cache.len()));
618
619 shared_inner.value_cache_mut().update(
620 self.value_cache.get_mut().drain(),
621 value_cache.drain().map(|(key, ())| key),
622 &self.value_cache_config,
623 &metrics,
624 );
625 }
626 }
627}
628
629enum ValueCache<'a, H: Hasher> {
631 Fresh(HashMap<Arc<[u8]>, CachedValue<H::Out>>),
634 ForStorageRoot {
636 shared_value_cache_access: MutexGuard<'a, ValueAccessSet>,
637 local_value_cache: MutexGuard<'a, ValueCacheMap<H::Out>>,
638 storage_root: H::Out,
639 buffered_value: Option<CachedValue<H::Out>>,
643 },
644}
645
646impl<H: Hasher> ValueCache<'_, H> {
647 fn get(
649 &mut self,
650 key: &[u8],
651 shared_cache: &SharedTrieCache<H>,
652 stats: &HitStats,
653 ) -> Option<&CachedValue<H::Out>> {
654 stats.local_fetch_attempts.fetch_add(1, Ordering::Relaxed);
655
656 match self {
657 Self::Fresh(map) => {
658 if let Some(value) = map.get(key) {
659 stats.local_hits.fetch_add(1, Ordering::Relaxed);
660 Some(value)
661 } else {
662 None
663 }
664 },
665 Self::ForStorageRoot {
666 local_value_cache,
667 shared_value_cache_access,
668 storage_root,
669 buffered_value,
670 } => {
671 let hash = ValueCacheKey::hash_data(key, storage_root);
680
681 if let Some(value) = local_value_cache
682 .peek_by_hash(hash.raw(), |existing_key, _| {
683 existing_key.is_eq(storage_root, key)
684 }) {
685 stats.local_hits.fetch_add(1, Ordering::Relaxed);
686
687 return Some(value);
688 }
689
690 stats.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed);
691 if let Some(value) = shared_cache.peek_value_by_hash(hash, storage_root, key) {
692 stats.shared_hits.fetch_add(1, Ordering::Relaxed);
693 shared_value_cache_access.insert(hash, ());
694 *buffered_value = Some(value.clone());
695 return buffered_value.as_ref();
696 }
697
698 None
699 },
700 }
701 }
702
703 fn insert(&mut self, key: &[u8], value: CachedValue<H::Out>) {
705 match self {
706 Self::Fresh(map) => {
707 map.insert(key.into(), value);
708 },
709 Self::ForStorageRoot { local_value_cache, storage_root, .. } => {
710 local_value_cache.insert(ValueCacheRef::new(key, *storage_root), value);
711 },
712 }
713 }
714}
715
716pub struct TrieCache<'a, H: Hasher> {
722 shared_cache: SharedTrieCache<H>,
723 local_cache: MutexGuard<'a, NodeCacheMap<H::Out>>,
724 value_cache: ValueCache<'a, H>,
725 stats: &'a TrieHitStats,
726}
727
728impl<'a, H: Hasher> TrieCache<'a, H> {
729 pub fn merge_into(self, local: &LocalTrieCache<H>, storage_root: H::Out) {
736 let ValueCache::Fresh(cache) = self.value_cache else { return };
737
738 if !cache.is_empty() {
739 let mut value_cache = local.value_cache.lock();
740 let partial_hash = ValueCacheKey::hash_partial_data(&storage_root);
741 cache.into_iter().for_each(|(k, v)| {
742 let hash = ValueCacheKeyHash::from_hasher_and_storage_key(partial_hash.clone(), &k);
743 let k = ValueCacheRef { storage_root, storage_key: &k, hash };
744 value_cache.insert(k, v);
745 });
746 }
747 }
748}
749
750impl<'a, H: Hasher> trie_db::TrieCache<NodeCodec<H>> for TrieCache<'a, H> {
751 fn get_or_insert_node(
752 &mut self,
753 hash: H::Out,
754 fetch_node: &mut dyn FnMut() -> trie_db::Result<NodeOwned<H::Out>, H::Out, Error<H::Out>>,
755 ) -> trie_db::Result<&NodeOwned<H::Out>, H::Out, Error<H::Out>> {
756 let mut is_local_cache_hit = true;
757 self.stats.node_cache.local_fetch_attempts.fetch_add(1, Ordering::Relaxed);
758
759 let node = self.local_cache.get_or_insert_fallible(hash, || {
761 is_local_cache_hit = false;
762
763 self.stats.node_cache.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed);
765 if let Some(node) = self.shared_cache.peek_node(&hash) {
766 self.stats.node_cache.shared_hits.fetch_add(1, Ordering::Relaxed);
767 tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache");
768
769 return Ok(NodeCached::<H::Out> { node: node.clone(), is_from_shared_cache: true });
770 }
771
772 match fetch_node() {
774 Ok(node) => {
775 tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from database");
776 Ok(NodeCached::<H::Out> { node, is_from_shared_cache: false })
777 },
778 Err(error) => {
779 tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from database failed");
780 Err(error)
781 },
782 }
783 });
784
785 if is_local_cache_hit {
786 tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from local cache");
787 self.stats.node_cache.local_hits.fetch_add(1, Ordering::Relaxed);
788 }
789
790 Ok(&node?
791 .expect("you can always insert at least one element into the local cache; qed")
792 .node)
793 }
794
795 fn get_node(&mut self, hash: &H::Out) -> Option<&NodeOwned<H::Out>> {
796 let mut is_local_cache_hit = true;
797 self.stats.node_cache.local_fetch_attempts.fetch_add(1, Ordering::Relaxed);
798
799 let cached_node = self.local_cache.get_or_insert_fallible(*hash, || {
801 is_local_cache_hit = false;
802
803 self.stats.node_cache.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed);
805 if let Some(node) = self.shared_cache.peek_node(&hash) {
806 self.stats.node_cache.shared_hits.fetch_add(1, Ordering::Relaxed);
807 tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache");
808
809 Ok(NodeCached::<H::Out> { node: node.clone(), is_from_shared_cache: true })
810 } else {
811 tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from cache failed");
812
813 Err(())
814 }
815 });
816
817 if is_local_cache_hit {
818 tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from local cache");
819 self.stats.node_cache.local_hits.fetch_add(1, Ordering::Relaxed);
820 }
821
822 match cached_node {
823 Ok(Some(cached_node)) => Some(&cached_node.node),
824 Ok(None) => {
825 unreachable!(
826 "you can always insert at least one element into the local cache; qed"
827 );
828 },
829 Err(()) => None,
830 }
831 }
832
833 fn lookup_value_for_key(&mut self, key: &[u8]) -> Option<&CachedValue<H::Out>> {
834 let res = self.value_cache.get(key, &self.shared_cache, &self.stats.value_cache);
835
836 tracing::trace!(
837 target: LOG_TARGET,
838 key = ?sp_core::hexdisplay::HexDisplay::from(&key),
839 found = res.is_some(),
840 "Looked up value for key",
841 );
842
843 res
844 }
845
846 fn cache_value_for_key(&mut self, key: &[u8], data: CachedValue<H::Out>) {
847 tracing::trace!(
848 target: LOG_TARGET,
849 key = ?sp_core::hexdisplay::HexDisplay::from(&key),
850 "Caching value for key",
851 );
852
853 self.value_cache.insert(key, data);
854 }
855}
856
857#[cfg(test)]
858mod tests {
859 use super::*;
860 use rand::{thread_rng, Rng};
861 use sp_core::H256;
862 use trie_db::{Bytes, Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut};
863
864 type MemoryDB = crate::MemoryDB<sp_core::Blake2Hasher>;
865 type Layout = crate::LayoutV1<sp_core::Blake2Hasher>;
866 type Cache = super::SharedTrieCache<sp_core::Blake2Hasher>;
867 type Recorder = crate::recorder::Recorder<sp_core::Blake2Hasher>;
868
869 const TEST_DATA: &[(&[u8], &[u8])] =
870 &[(b"key1", b"val1"), (b"key2", &[2; 64]), (b"key3", b"val3"), (b"key4", &[4; 64])];
871 const CACHE_SIZE_RAW: usize = 1024 * 10;
872 const CACHE_SIZE: CacheSize = CacheSize::new(CACHE_SIZE_RAW);
873
874 fn create_trie() -> (MemoryDB, TrieHash<Layout>) {
875 let mut db = MemoryDB::default();
876 let mut root = Default::default();
877
878 {
879 let mut trie = TrieDBMutBuilder::<Layout>::new(&mut db, &mut root).build();
880 for (k, v) in TEST_DATA {
881 trie.insert(k, v).expect("Inserts data");
882 }
883 }
884
885 (db, root)
886 }
887
888 #[test]
889 fn basic_cache_works() {
890 let (db, root) = create_trie();
891
892 let shared_cache = Cache::new(CACHE_SIZE, None);
893 let local_cache = shared_cache.local_cache_untrusted();
894
895 {
896 let mut cache = local_cache.as_trie_db_cache(root);
897 let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
898 assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap());
899 }
900
901 assert!(shared_cache.read_lock_inner().value_cache().lru.is_empty());
903 assert!(shared_cache.read_lock_inner().node_cache().lru.is_empty());
904
905 drop(local_cache);
906
907 assert!(shared_cache.read_lock_inner().node_cache().lru.len() >= 1);
909 let cached_data = shared_cache
910 .read_lock_inner()
911 .value_cache()
912 .lru
913 .peek(&ValueCacheKey::new_value(TEST_DATA[0].0, root))
914 .unwrap()
915 .clone();
916 assert_eq!(Bytes::from(TEST_DATA[0].1.to_vec()), cached_data.data().flatten().unwrap());
917
918 let fake_data = Bytes::from(&b"fake_data"[..]);
919
920 let local_cache = shared_cache.local_cache_untrusted();
921 shared_cache.write_lock_inner().unwrap().value_cache_mut().lru.insert(
922 ValueCacheKey::new_value(TEST_DATA[1].0, root),
923 (fake_data.clone(), Default::default()).into(),
924 );
925
926 {
927 let mut cache = local_cache.as_trie_db_cache(root);
928 let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
929
930 assert_eq!(b"fake_data".to_vec(), trie.get(TEST_DATA[1].0).unwrap().unwrap());
932 }
933 }
934
935 #[test]
936 fn trie_db_mut_cache_works() {
937 let (mut db, root) = create_trie();
938
939 let new_key = b"new_key".to_vec();
940 let new_value = vec![23; 64];
942
943 let shared_cache = Cache::new(CACHE_SIZE, None);
944 let mut new_root = root;
945
946 {
947 let local_cache = shared_cache.local_cache_untrusted();
948
949 let mut cache = local_cache.as_trie_db_mut_cache();
950
951 {
952 let mut trie = TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
953 .with_cache(&mut cache)
954 .build();
955
956 trie.insert(&new_key, &new_value).unwrap();
957 }
958
959 cache.merge_into(&local_cache, new_root);
960 }
961
962 let cached_data = shared_cache
965 .read_lock_inner()
966 .value_cache()
967 .lru
968 .peek(&ValueCacheKey::new_value(new_key, new_root))
969 .unwrap()
970 .clone();
971 assert_eq!(Bytes::from(new_value), cached_data.data().flatten().unwrap());
972 }
973
974 #[test]
975 fn trie_db_cache_and_recorder_work_together() {
976 let (db, root) = create_trie();
977
978 let shared_cache = Cache::new(CACHE_SIZE, None);
979
980 for i in 0..5 {
981 if i == 2 {
983 shared_cache.reset_node_cache();
984 } else if i == 3 {
985 shared_cache.reset_value_cache();
986 }
987
988 let local_cache = shared_cache.local_cache_untrusted();
989 let recorder = Recorder::default();
990
991 {
992 let mut cache = local_cache.as_trie_db_cache(root);
993 let mut recorder = recorder.as_trie_recorder(root);
994 let trie = TrieDBBuilder::<Layout>::new(&db, &root)
995 .with_cache(&mut cache)
996 .with_recorder(&mut recorder)
997 .build();
998
999 for (key, value) in TEST_DATA {
1000 assert_eq!(*value, trie.get(&key).unwrap().unwrap());
1001 }
1002 }
1003
1004 let storage_proof = recorder.drain_storage_proof();
1005 let memory_db: MemoryDB = storage_proof.into_memory_db();
1006
1007 {
1008 let trie = TrieDBBuilder::<Layout>::new(&memory_db, &root).build();
1009
1010 for (key, value) in TEST_DATA {
1011 assert_eq!(*value, trie.get(&key).unwrap().unwrap());
1012 }
1013 }
1014 }
1015 }
1016
1017 #[test]
1018 fn trie_db_mut_cache_and_recorder_work_together() {
1019 const DATA_TO_ADD: &[(&[u8], &[u8])] = &[(b"key11", &[45; 78]), (b"key33", &[78; 89])];
1020
1021 let (db, root) = create_trie();
1022
1023 let shared_cache = Cache::new(CACHE_SIZE, None);
1024
1025 for i in 0..5 {
1027 if i == 2 {
1029 shared_cache.reset_node_cache();
1030 } else if i == 3 {
1031 shared_cache.reset_value_cache();
1032 }
1033
1034 let recorder = Recorder::default();
1035 let local_cache = shared_cache.local_cache_untrusted();
1036 let mut new_root = root;
1037
1038 {
1039 let mut db = db.clone();
1040 let mut cache = local_cache.as_trie_db_cache(root);
1041 let mut recorder = recorder.as_trie_recorder(root);
1042 let mut trie = TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
1043 .with_cache(&mut cache)
1044 .with_recorder(&mut recorder)
1045 .build();
1046
1047 for (key, value) in DATA_TO_ADD {
1048 trie.insert(key, value).unwrap();
1049 }
1050 }
1051
1052 let storage_proof = recorder.drain_storage_proof();
1053 let mut memory_db: MemoryDB = storage_proof.into_memory_db();
1054 let mut proof_root = root;
1055
1056 {
1057 let mut trie =
1058 TrieDBMutBuilder::<Layout>::from_existing(&mut memory_db, &mut proof_root)
1059 .build();
1060
1061 for (key, value) in DATA_TO_ADD {
1062 trie.insert(key, value).unwrap();
1063 }
1064 }
1065
1066 assert_eq!(new_root, proof_root)
1067 }
1068 }
1069
1070 #[test]
1071 fn cache_lru_works() {
1072 let (db, root) = create_trie();
1073
1074 let shared_cache = Cache::new(CACHE_SIZE, None);
1075
1076 {
1077 let local_cache = shared_cache.local_cache_untrusted();
1078
1079 let mut cache = local_cache.as_trie_db_cache(root);
1080 let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
1081
1082 for (k, _) in TEST_DATA {
1083 trie.get(k).unwrap().unwrap();
1084 }
1085 }
1086
1087 assert!(shared_cache
1089 .read_lock_inner()
1090 .value_cache()
1091 .lru
1092 .iter()
1093 .map(|d| d.0)
1094 .all(|l| TEST_DATA.iter().any(|d| &*l.storage_key == d.0)));
1095
1096 for _ in 0..2 {
1100 {
1101 let local_cache = shared_cache.local_cache_untrusted();
1102
1103 let mut cache = local_cache.as_trie_db_cache(root);
1104 let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
1105
1106 for (k, _) in TEST_DATA.iter().take(2) {
1107 trie.get(k).unwrap().unwrap();
1108 }
1109 }
1110
1111 assert!(shared_cache
1114 .read_lock_inner()
1115 .value_cache()
1116 .lru
1117 .iter()
1118 .take(2)
1119 .map(|d| d.0)
1120 .all(|l| { TEST_DATA.iter().take(2).any(|d| &*l.storage_key == d.0) }));
1121
1122 shared_cache.reset_value_cache();
1124 }
1125
1126 let most_recently_used_nodes = shared_cache
1127 .read_lock_inner()
1128 .node_cache()
1129 .lru
1130 .iter()
1131 .map(|d| *d.0)
1132 .collect::<Vec<_>>();
1133
1134 {
1135 let local_cache = shared_cache.local_cache_untrusted();
1136
1137 let mut cache = local_cache.as_trie_db_cache(root);
1138 let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
1139
1140 for (k, _) in TEST_DATA.iter().skip(2) {
1141 trie.get(k).unwrap().unwrap();
1142 }
1143 }
1144
1145 assert_ne!(
1147 most_recently_used_nodes,
1148 shared_cache
1149 .read_lock_inner()
1150 .node_cache()
1151 .lru
1152 .iter()
1153 .map(|d| *d.0)
1154 .collect::<Vec<_>>()
1155 );
1156 }
1157
1158 #[test]
1159 fn cache_respects_bounds() {
1160 let (mut db, root) = create_trie();
1161
1162 let shared_cache = Cache::new(CACHE_SIZE, None);
1163 {
1164 let local_cache = shared_cache.local_cache_untrusted();
1165
1166 let mut new_root = root;
1167
1168 {
1169 let mut cache = local_cache.as_trie_db_cache(root);
1170 {
1171 let mut trie =
1172 TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
1173 .with_cache(&mut cache)
1174 .build();
1175
1176 let value = vec![10u8; 100];
1177 for i in 0..CACHE_SIZE_RAW / 100 * 2 {
1179 trie.insert(format!("key{}", i).as_bytes(), &value).unwrap();
1180 }
1181 }
1182
1183 cache.merge_into(&local_cache, new_root);
1184 }
1185 }
1186
1187 assert!(shared_cache.used_memory_size() < CACHE_SIZE_RAW);
1188 }
1189
1190 #[test]
1191 fn test_trusted_works() {
1192 let (mut db, root) = create_trie();
1193 let cache_size = CacheSize::new(1024 * 1024 * 1024);
1195 let num_test_keys: usize = 40000;
1196 let shared_cache = Cache::new(cache_size, None);
1197
1198 let mut rng = thread_rng();
1200 let random_keys: Vec<Vec<u8>> =
1201 (0..num_test_keys).map(|_| (0..100).map(|_| rng.gen()).collect()).collect();
1202
1203 let value = vec![10u8; 100];
1204
1205 let root = {
1208 let local_cache = shared_cache.local_cache_untrusted();
1209
1210 let mut new_root = root;
1211
1212 {
1213 let mut cache = local_cache.as_trie_db_mut_cache();
1214 {
1215 let mut trie =
1216 TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
1217 .with_cache(&mut cache)
1218 .build();
1219
1220 for key in random_keys.iter() {
1222 trie.insert(key.as_ref(), &value).unwrap();
1223 }
1224 }
1225
1226 cache.merge_into(&local_cache, new_root);
1227 }
1228 new_root
1229 };
1230 let shared_value_cache_len = shared_cache.read_lock_inner().value_cache().lru.len();
1231 assert!(shared_value_cache_len < num_test_keys / 10);
1232
1233 let stats = read_to_check_cache(&shared_cache, &mut db, root, &random_keys, value.clone());
1235 assert_eq!(stats.value_cache.shared_hits, shared_value_cache_len as u64);
1236
1237 assert_ne!(stats.value_cache.shared_fetch_attempts, stats.value_cache.shared_hits);
1238 assert_ne!(stats.node_cache.shared_fetch_attempts, stats.node_cache.shared_hits);
1239
1240 let shared_value_cache_len = shared_cache.read_lock_inner().value_cache().lru.len();
1242 let new_value = vec![9u8; 100];
1243 let root = {
1244 let local_cache = shared_cache.local_cache_trusted();
1245
1246 let mut new_root = root;
1247
1248 {
1249 let mut cache = local_cache.as_trie_db_mut_cache();
1250 {
1251 let mut trie =
1252 TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
1253 .with_cache(&mut cache)
1254 .build();
1255
1256 for key in random_keys.iter() {
1258 trie.insert(key.as_ref(), &new_value).unwrap();
1259 }
1260 }
1261
1262 cache.merge_into(&local_cache, new_root);
1263 }
1264 new_root
1265 };
1266
1267 let stats =
1269 read_to_check_cache(&shared_cache, &mut db, root, &random_keys, new_value.clone());
1270
1271 assert_eq!(stats.value_cache.shared_fetch_attempts, stats.value_cache.shared_hits);
1272 assert_eq!(stats.node_cache.shared_fetch_attempts, stats.node_cache.shared_hits);
1273
1274 assert_eq!(stats.value_cache.shared_fetch_attempts, stats.value_cache.local_fetch_attempts);
1275 assert_eq!(stats.node_cache.shared_fetch_attempts, stats.node_cache.local_fetch_attempts);
1276
1277 assert_eq!(
1280 shared_cache.read_lock_inner().value_cache().lru.len(),
1281 shared_value_cache_len + num_test_keys
1282 );
1283 }
1284
1285 fn read_to_check_cache(
1289 shared_cache: &Cache,
1290 db: &mut MemoryDB,
1291 root: H256,
1292 keys: &Vec<Vec<u8>>,
1293 expected_value: Vec<u8>,
1294 ) -> TrieHitStatsSnapshot {
1295 let local_cache = shared_cache.local_cache_untrusted();
1296 let mut cache = local_cache.as_trie_db_cache(root);
1297 let trie = TrieDBBuilder::<Layout>::new(db, &root).with_cache(&mut cache).build();
1298
1299 for key in keys.iter() {
1300 assert_eq!(trie.get(key.as_ref()).unwrap().unwrap(), expected_value);
1301 }
1302 local_cache.stats.snapshot()
1303 }
1304}