1use super::{DbState, DbStateBuilder};
10use hash_db::{Hasher as DbHasher, Prefix};
11use kvdb::{DBTransaction, KeyValueDB};
12use linked_hash_map::LinkedHashMap;
13use parking_lot::Mutex;
14use std::{
15 cell::{Cell, RefCell},
16 collections::HashMap,
17 sync::Arc,
18};
19use subsoil::core::{
20 hexdisplay::HexDisplay,
21 storage::{ChildInfo, TrackedStorageKey},
22};
23use subsoil::runtime::{traits::Hash, StateVersion, Storage};
24use subsoil::state_machine::{
25 backend::Backend as StateBackend, BackendTransaction, ChildStorageCollection, DBValue,
26 IterArgs, StorageCollection, StorageIterator, StorageKey, StorageValue,
27};
28use subsoil::trie::{
29 cache::{CacheSize, SharedTrieCache},
30 prefixed_key, MemoryDB, MerkleValue,
31};
32
33type State<H> = DbState<H>;
34
35struct StorageDb<Hasher> {
36 db: Arc<dyn KeyValueDB>,
37 _phantom: std::marker::PhantomData<Hasher>,
38}
39
40impl<Hasher: Hash> subsoil::state_machine::Storage<Hasher> for StorageDb<Hasher> {
41 fn get(&self, key: &Hasher::Output, prefix: Prefix) -> Result<Option<DBValue>, String> {
42 let prefixed_key = prefixed_key::<Hasher>(key, prefix);
43 self.db
44 .get(0, &prefixed_key)
45 .map_err(|e| format!("Database backend error: {:?}", e))
46 }
47}
48
49struct KeyTracker {
50 enable_tracking: bool,
51 main_keys: LinkedHashMap<Vec<u8>, TrackedStorageKey>,
55 child_keys: LinkedHashMap<Vec<u8>, LinkedHashMap<Vec<u8>, TrackedStorageKey>>,
60}
61
62pub struct BenchmarkingState<Hasher: Hash> {
64 root: Cell<Hasher::Output>,
65 genesis_root: Hasher::Output,
66 state: RefCell<Option<State<Hasher>>>,
67 db: Cell<Option<Arc<dyn KeyValueDB>>>,
68 genesis: HashMap<Vec<u8>, (Vec<u8>, i32)>,
69 record: Cell<Vec<Vec<u8>>>,
70 key_tracker: Arc<Mutex<KeyTracker>>,
71 whitelist: RefCell<Vec<TrackedStorageKey>>,
72 proof_recorder: Option<subsoil::trie::recorder::Recorder<Hasher>>,
73 proof_recorder_root: Cell<Hasher::Output>,
74 shared_trie_cache: SharedTrieCache<Hasher>,
75}
76
77pub struct RawIter<Hasher: Hash> {
79 inner: <DbState<Hasher> as StateBackend<Hasher>>::RawIter,
80 child_trie: Option<Vec<u8>>,
81 key_tracker: Arc<Mutex<KeyTracker>>,
82}
83
84impl<Hasher: Hash> StorageIterator<Hasher> for RawIter<Hasher> {
85 type Backend = BenchmarkingState<Hasher>;
86 type Error = String;
87
88 fn next_key(&mut self, backend: &Self::Backend) -> Option<Result<StorageKey, Self::Error>> {
89 match self.inner.next_key(backend.state.borrow().as_ref()?) {
90 Some(Ok(key)) => {
91 self.key_tracker.lock().add_read_key(self.child_trie.as_deref(), &key);
92 Some(Ok(key))
93 },
94 result => result,
95 }
96 }
97
98 fn next_pair(
99 &mut self,
100 backend: &Self::Backend,
101 ) -> Option<Result<(StorageKey, StorageValue), Self::Error>> {
102 match self.inner.next_pair(backend.state.borrow().as_ref()?) {
103 Some(Ok((key, value))) => {
104 self.key_tracker.lock().add_read_key(self.child_trie.as_deref(), &key);
105 Some(Ok((key, value)))
106 },
107 result => result,
108 }
109 }
110
111 fn was_complete(&self) -> bool {
112 self.inner.was_complete()
113 }
114}
115
116impl<Hasher: Hash> BenchmarkingState<Hasher> {
117 pub fn new(
119 genesis: Storage,
120 _cache_size_mb: Option<usize>,
121 record_proof: bool,
122 enable_tracking: bool,
123 ) -> Result<Self, String> {
124 let state_version = subsoil::runtime::StateVersion::default();
125 let mut root = Default::default();
126 let mut mdb = MemoryDB::<Hasher>::default();
127 subsoil::trie::trie_types::TrieDBMutBuilderV1::<Hasher>::new(&mut mdb, &mut root).build();
128
129 let mut state = BenchmarkingState {
130 state: RefCell::new(None),
131 db: Cell::new(None),
132 root: Cell::new(root),
133 genesis: Default::default(),
134 genesis_root: Default::default(),
135 record: Default::default(),
136 key_tracker: Arc::new(Mutex::new(KeyTracker {
137 main_keys: Default::default(),
138 child_keys: Default::default(),
139 enable_tracking,
140 })),
141 whitelist: Default::default(),
142 proof_recorder: record_proof.then(Default::default),
143 proof_recorder_root: Cell::new(root),
144 shared_trie_cache: SharedTrieCache::new(CacheSize::new(0)),
146 };
147
148 state.add_whitelist_to_tracker();
149
150 state.reopen()?;
151 let child_delta = genesis.children_default.values().map(|child_content| {
152 (
153 &child_content.child_info,
154 child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))),
155 )
156 });
157 let (root, transaction): (Hasher::Output, _) =
158 state.state.borrow().as_ref().unwrap().full_storage_root(
159 genesis.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))),
160 child_delta,
161 state_version,
162 );
163 state.genesis = transaction.clone().drain();
164 state.genesis_root = root;
165 state.commit(root, transaction, Vec::new(), Vec::new())?;
166 state.record.take();
167 Ok(state)
168 }
169
170 pub fn recorder(&self) -> Option<subsoil::trie::recorder::Recorder<Hasher>> {
172 self.proof_recorder.clone()
173 }
174
175 fn reopen(&self) -> Result<(), String> {
176 *self.state.borrow_mut() = None;
177 let db = match self.db.take() {
178 Some(db) => db,
179 None => Arc::new(kvdb_memorydb::create(1)),
180 };
181 self.db.set(Some(db.clone()));
182 if let Some(recorder) = &self.proof_recorder {
183 recorder.reset();
184 self.proof_recorder_root.set(self.root.get());
185 }
186 let storage_db = Arc::new(StorageDb::<Hasher> { db, _phantom: Default::default() });
187 *self.state.borrow_mut() = Some(
188 DbStateBuilder::<Hasher>::new(storage_db, self.root.get())
189 .with_optional_recorder(self.proof_recorder.clone())
190 .with_cache(self.shared_trie_cache.local_cache_trusted())
191 .build(),
192 );
193 Ok(())
194 }
195
196 fn add_whitelist_to_tracker(&self) {
197 self.key_tracker.lock().add_whitelist(&self.whitelist.borrow());
198 }
199
200 fn wipe_tracker(&self) {
201 let mut key_tracker = self.key_tracker.lock();
202 key_tracker.main_keys = LinkedHashMap::new();
203 key_tracker.child_keys = LinkedHashMap::new();
204 key_tracker.add_whitelist(&self.whitelist.borrow());
205 }
206
207 fn add_read_key(&self, childtrie: Option<&[u8]>, key: &[u8]) {
208 self.key_tracker.lock().add_read_key(childtrie, key);
209 }
210
211 fn add_write_key(&self, childtrie: Option<&[u8]>, key: &[u8]) {
212 self.key_tracker.lock().add_write_key(childtrie, key);
213 }
214
215 fn all_trackers(&self) -> Vec<TrackedStorageKey> {
216 self.key_tracker.lock().all_trackers()
217 }
218}
219
220impl KeyTracker {
221 fn add_whitelist(&mut self, whitelist: &[TrackedStorageKey]) {
222 whitelist.iter().for_each(|key| {
223 let mut whitelisted = TrackedStorageKey::new(key.key.clone());
224 whitelisted.whitelist();
225 self.main_keys.insert(key.key.clone(), whitelisted);
226 });
227 }
228
229 fn add_read_key(&mut self, childtrie: Option<&[u8]>, key: &[u8]) {
231 if !self.enable_tracking {
232 return;
233 }
234
235 let child_key_tracker = &mut self.child_keys;
236 let main_key_tracker = &mut self.main_keys;
237
238 let key_tracker = if let Some(childtrie) = childtrie {
239 child_key_tracker.entry(childtrie.to_vec()).or_insert_with(LinkedHashMap::new)
240 } else {
241 main_key_tracker
242 };
243
244 let should_log = match key_tracker.get_mut(key) {
245 None => {
246 let mut has_been_read = TrackedStorageKey::new(key.to_vec());
247 has_been_read.add_read();
248 key_tracker.insert(key.to_vec(), has_been_read);
249 true
250 },
251 Some(tracker) => {
252 let should_log = !tracker.has_been_read();
253 tracker.add_read();
254 should_log
255 },
256 };
257
258 if should_log {
259 if let Some(childtrie) = childtrie {
260 log::trace!(
261 target: "benchmark",
262 "Childtrie Read: {} {}", HexDisplay::from(&childtrie), HexDisplay::from(&key)
263 );
264 } else {
265 log::trace!(target: "benchmark", "Read: {}", HexDisplay::from(&key));
266 }
267 }
268 }
269
270 fn add_write_key(&mut self, childtrie: Option<&[u8]>, key: &[u8]) {
272 if !self.enable_tracking {
273 return;
274 }
275
276 let child_key_tracker = &mut self.child_keys;
277 let main_key_tracker = &mut self.main_keys;
278
279 let key_tracker = if let Some(childtrie) = childtrie {
280 child_key_tracker.entry(childtrie.to_vec()).or_insert_with(LinkedHashMap::new)
281 } else {
282 main_key_tracker
283 };
284
285 let should_log = match key_tracker.get_mut(key) {
287 None => {
288 let mut has_been_written = TrackedStorageKey::new(key.to_vec());
289 has_been_written.add_write();
290 key_tracker.insert(key.to_vec(), has_been_written);
291 true
292 },
293 Some(tracker) => {
294 let should_log = !tracker.has_been_written();
295 tracker.add_write();
296 should_log
297 },
298 };
299
300 if should_log {
301 if let Some(childtrie) = childtrie {
302 log::trace!(
303 target: "benchmark",
304 "Childtrie Write: {} {}", HexDisplay::from(&childtrie), HexDisplay::from(&key)
305 );
306 } else {
307 log::trace!(target: "benchmark", "Write: {}", HexDisplay::from(&key));
308 }
309 }
310 }
311
312 fn all_trackers(&self) -> Vec<TrackedStorageKey> {
314 let mut all_trackers = Vec::new();
315
316 self.main_keys.iter().for_each(|(_, tracker)| {
317 all_trackers.push(tracker.clone());
318 });
319
320 self.child_keys.iter().for_each(|(_, child_tracker)| {
321 child_tracker.iter().for_each(|(_, tracker)| {
322 all_trackers.push(tracker.clone());
323 });
324 });
325
326 all_trackers
327 }
328}
329
330fn state_err() -> String {
331 "State is not open".into()
332}
333
334impl<Hasher: Hash> StateBackend<Hasher> for BenchmarkingState<Hasher> {
335 type Error = <DbState<Hasher> as StateBackend<Hasher>>::Error;
336 type TrieBackendStorage = <DbState<Hasher> as StateBackend<Hasher>>::TrieBackendStorage;
337 type RawIter = RawIter<Hasher>;
338
339 fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
340 self.add_read_key(None, key);
341 self.state.borrow().as_ref().ok_or_else(state_err)?.storage(key)
342 }
343
344 fn storage_hash(&self, key: &[u8]) -> Result<Option<Hasher::Output>, Self::Error> {
345 self.add_read_key(None, key);
346 self.state.borrow().as_ref().ok_or_else(state_err)?.storage_hash(key)
347 }
348
349 fn child_storage(
350 &self,
351 child_info: &ChildInfo,
352 key: &[u8],
353 ) -> Result<Option<Vec<u8>>, Self::Error> {
354 self.add_read_key(Some(child_info.storage_key()), key);
355 self.state
356 .borrow()
357 .as_ref()
358 .ok_or_else(state_err)?
359 .child_storage(child_info, key)
360 }
361
362 fn child_storage_hash(
363 &self,
364 child_info: &ChildInfo,
365 key: &[u8],
366 ) -> Result<Option<Hasher::Output>, Self::Error> {
367 self.add_read_key(Some(child_info.storage_key()), key);
368 self.state
369 .borrow()
370 .as_ref()
371 .ok_or_else(state_err)?
372 .child_storage_hash(child_info, key)
373 }
374
375 fn closest_merkle_value(
376 &self,
377 key: &[u8],
378 ) -> Result<Option<MerkleValue<Hasher::Output>>, Self::Error> {
379 self.add_read_key(None, key);
380 self.state.borrow().as_ref().ok_or_else(state_err)?.closest_merkle_value(key)
381 }
382
383 fn child_closest_merkle_value(
384 &self,
385 child_info: &ChildInfo,
386 key: &[u8],
387 ) -> Result<Option<MerkleValue<Hasher::Output>>, Self::Error> {
388 self.add_read_key(None, key);
389 self.state
390 .borrow()
391 .as_ref()
392 .ok_or_else(state_err)?
393 .child_closest_merkle_value(child_info, key)
394 }
395
396 fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
397 self.add_read_key(None, key);
398 self.state.borrow().as_ref().ok_or_else(state_err)?.exists_storage(key)
399 }
400
401 fn exists_child_storage(
402 &self,
403 child_info: &ChildInfo,
404 key: &[u8],
405 ) -> Result<bool, Self::Error> {
406 self.add_read_key(Some(child_info.storage_key()), key);
407 self.state
408 .borrow()
409 .as_ref()
410 .ok_or_else(state_err)?
411 .exists_child_storage(child_info, key)
412 }
413
414 fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
415 self.add_read_key(None, key);
416 self.state.borrow().as_ref().ok_or_else(state_err)?.next_storage_key(key)
417 }
418
419 fn next_child_storage_key(
420 &self,
421 child_info: &ChildInfo,
422 key: &[u8],
423 ) -> Result<Option<Vec<u8>>, Self::Error> {
424 self.add_read_key(Some(child_info.storage_key()), key);
425 self.state
426 .borrow()
427 .as_ref()
428 .ok_or_else(state_err)?
429 .next_child_storage_key(child_info, key)
430 }
431
432 fn storage_root<'a>(
433 &self,
434 delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
435 state_version: StateVersion,
436 ) -> (Hasher::Output, BackendTransaction<Hasher>) {
437 self.state
438 .borrow()
439 .as_ref()
440 .map_or(Default::default(), |s| s.storage_root(delta, state_version))
441 }
442
443 fn child_storage_root<'a>(
444 &self,
445 child_info: &ChildInfo,
446 delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
447 state_version: StateVersion,
448 ) -> (Hasher::Output, bool, BackendTransaction<Hasher>) {
449 self.state
450 .borrow()
451 .as_ref()
452 .map_or(Default::default(), |s| s.child_storage_root(child_info, delta, state_version))
453 }
454
455 fn raw_iter(&self, args: IterArgs) -> Result<Self::RawIter, Self::Error> {
456 let child_trie =
457 args.child_info.as_ref().map(|child_info| child_info.storage_key().to_vec());
458 self.state
459 .borrow()
460 .as_ref()
461 .map(|s| s.raw_iter(args))
462 .unwrap_or(Ok(Default::default()))
463 .map(|raw_iter| RawIter {
464 inner: raw_iter,
465 key_tracker: self.key_tracker.clone(),
466 child_trie,
467 })
468 }
469
470 fn commit(
471 &self,
472 storage_root: <Hasher as DbHasher>::Out,
473 mut transaction: BackendTransaction<Hasher>,
474 main_storage_changes: StorageCollection,
475 child_storage_changes: ChildStorageCollection,
476 ) -> Result<(), Self::Error> {
477 if let Some(db) = self.db.take() {
478 let mut db_transaction = DBTransaction::new();
479 let changes = transaction.drain();
480 let mut keys = Vec::with_capacity(changes.len());
481 for (key, (val, rc)) in changes {
482 if rc > 0 {
483 db_transaction.put(0, &key, &val);
484 } else if rc < 0 {
485 db_transaction.delete(0, &key);
486 }
487 keys.push(key);
488 }
489 let mut record = self.record.take();
490 record.extend(keys);
491 self.record.set(record);
492 db.write(db_transaction)
493 .map_err(|_| String::from("Error committing transaction"))?;
494 self.root.set(storage_root);
495 self.db.set(Some(db));
496
497 main_storage_changes.iter().for_each(|(key, _)| {
499 self.add_write_key(None, key);
500 });
501 child_storage_changes.iter().for_each(|(child_storage_key, storage_changes)| {
502 storage_changes.iter().for_each(|(key, _)| {
503 self.add_write_key(Some(child_storage_key), key);
504 })
505 });
506 } else {
507 return Err("Trying to commit to a closed db".into());
508 }
509 self.reopen()
510 }
511
512 fn wipe(&self) -> Result<(), Self::Error> {
513 let record = self.record.take();
515 if let Some(db) = self.db.take() {
516 let mut db_transaction = DBTransaction::new();
517 for key in record {
518 match self.genesis.get(&key) {
519 Some((v, _)) => db_transaction.put(0, &key, v),
520 None => db_transaction.delete(0, &key),
521 }
522 }
523 db.write(db_transaction)
524 .map_err(|_| String::from("Error committing transaction"))?;
525 self.db.set(Some(db));
526 }
527
528 self.root.set(self.genesis_root);
529 self.reopen()?;
530 self.wipe_tracker();
531 Ok(())
532 }
533
534 fn read_write_count(&self) -> (u32, u32, u32, u32) {
540 let mut reads = 0;
541 let mut repeat_reads = 0;
542 let mut writes = 0;
543 let mut repeat_writes = 0;
544
545 self.all_trackers().iter().for_each(|tracker| {
546 if !tracker.whitelisted {
547 if tracker.reads > 0 {
548 reads += 1;
549 repeat_reads += tracker.reads - 1;
550 }
551
552 if tracker.writes > 0 {
553 writes += 1;
554 repeat_writes += tracker.writes - 1;
555 }
556 }
557 });
558 (reads, repeat_reads, writes, repeat_writes)
559 }
560
561 fn reset_read_write_count(&self) {
563 self.wipe_tracker()
564 }
565
566 fn get_whitelist(&self) -> Vec<TrackedStorageKey> {
567 self.whitelist.borrow().to_vec()
568 }
569
570 fn set_whitelist(&self, new: Vec<TrackedStorageKey>) {
571 *self.whitelist.borrow_mut() = new;
572 }
573
574 fn get_read_and_written_keys(&self) -> Vec<(Vec<u8>, u32, u32, bool)> {
575 let mut prefix_key_tracker = LinkedHashMap::<Vec<u8>, (u32, u32, bool)>::new();
579 self.all_trackers().iter().for_each(|tracker| {
580 if !tracker.whitelisted {
581 let prefix_length = tracker.key.len().min(32);
582 let prefix = tracker.key[0..prefix_length].to_vec();
583 let reads = tracker.reads.min(1);
586 let writes = tracker.writes.min(1);
587 if let Some(prefix_tracker) = prefix_key_tracker.get_mut(&prefix) {
588 prefix_tracker.0 += reads;
589 prefix_tracker.1 += writes;
590 } else {
591 prefix_key_tracker.insert(prefix, (reads, writes, tracker.whitelisted));
592 }
593 }
594 });
595
596 prefix_key_tracker
597 .iter()
598 .map(|(key, tracker)| -> (Vec<u8>, u32, u32, bool) {
599 (key.to_vec(), tracker.0, tracker.1, tracker.2)
600 })
601 .collect::<Vec<_>>()
602 }
603
604 fn register_overlay_stats(&self, stats: &subsoil::state_machine::StateMachineStats) {
605 self.state.borrow().as_ref().map(|s| s.register_overlay_stats(stats));
606 }
607
608 fn usage_info(&self) -> subsoil::state_machine::UsageInfo {
609 self.state
610 .borrow()
611 .as_ref()
612 .map_or(subsoil::state_machine::UsageInfo::empty(), |s| s.usage_info())
613 }
614
615 fn proof_size(&self) -> Option<u32> {
616 self.proof_recorder.as_ref().map(|recorder| {
617 let proof_size = recorder.estimate_encoded_size() as u32;
618
619 let proof = recorder.to_storage_proof();
620
621 let proof_recorder_root = self.proof_recorder_root.get();
622 if proof_recorder_root == Default::default() || proof_size == 1 {
623 log::debug!(target: "benchmark", "Some proof size: {}", &proof_size);
625 proof_size
626 } else {
627 if let Some(size) = proof.encoded_compact_size::<Hasher>(proof_recorder_root) {
628 size as u32
629 } else if proof_recorder_root == self.root.get() {
630 log::debug!(target: "benchmark", "No changes - no proof");
631 0
632 } else {
633 panic!(
634 "proof rec root {:?}, root {:?}, genesis {:?}, rec_len {:?}",
635 self.proof_recorder_root.get(),
636 self.root.get(),
637 self.genesis_root,
638 proof_size,
639 );
640 }
641 }
642 })
643 }
644}
645
646impl<Hasher: Hash> std::fmt::Debug for BenchmarkingState<Hasher> {
647 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
648 write!(f, "Bench DB")
649 }
650}
651
652#[cfg(test)]
653mod test {
654 use super::BenchmarkingState;
655 use subsoil::runtime::traits::HashingFor;
656 use subsoil::state_machine::backend::Backend as _;
657
658 fn hex(hex: &str) -> Vec<u8> {
659 array_bytes::hex2bytes(hex).unwrap()
660 }
661
662 #[test]
663 fn iteration_is_also_counted_in_rw_counts() {
664 let storage = subsoil::runtime::Storage {
665 top: vec![(
666 hex("ce6e1397e668c7fcf47744350dc59688455a2c2dbd2e2a649df4e55d93cd7158"),
667 hex("0102030405060708"),
668 )]
669 .into_iter()
670 .collect(),
671 ..subsoil::runtime::Storage::default()
672 };
673 let bench_state = BenchmarkingState::<HashingFor<crate::db::tests::Block>>::new(
674 storage, None, false, true,
675 )
676 .unwrap();
677
678 assert_eq!(bench_state.read_write_count(), (0, 0, 0, 0));
679 assert_eq!(bench_state.keys(Default::default()).unwrap().count(), 1);
680 assert_eq!(bench_state.read_write_count(), (1, 0, 0, 0));
681 }
682
683 #[test]
684 fn read_to_main_and_child_tries() {
685 let bench_state = BenchmarkingState::<HashingFor<crate::db::tests::Block>>::new(
686 Default::default(),
687 None,
688 false,
689 true,
690 )
691 .unwrap();
692
693 for _ in 0..2 {
694 let child1 = subsoil::core::storage::ChildInfo::new_default(b"child1");
695 let child2 = subsoil::core::storage::ChildInfo::new_default(b"child2");
696
697 bench_state.storage(b"foo").unwrap();
698 bench_state.child_storage(&child1, b"foo").unwrap();
699 bench_state.child_storage(&child2, b"foo").unwrap();
700
701 bench_state.storage(b"bar").unwrap();
702 bench_state.child_storage(&child1, b"bar").unwrap();
703 bench_state.child_storage(&child2, b"bar").unwrap();
704
705 bench_state
706 .commit(
707 Default::default(),
708 Default::default(),
709 vec![("foo".as_bytes().to_vec(), None)],
710 vec![("child1".as_bytes().to_vec(), vec![("foo".as_bytes().to_vec(), None)])],
711 )
712 .unwrap();
713
714 let rw_tracker = bench_state.read_write_count();
715 assert_eq!(rw_tracker.0, 6);
716 assert_eq!(rw_tracker.1, 0);
717 assert_eq!(rw_tracker.2, 2);
718 assert_eq!(rw_tracker.3, 0);
719 bench_state.wipe().unwrap();
720 }
721 }
722}