namada_storage/
mockdb.rs

1//! DB mock for testing
2
3#![allow(clippy::cast_possible_wrap, clippy::arithmetic_side_effects)]
4
5use std::cell::RefCell;
6use std::collections::{BTreeMap, btree_map};
7use std::path::Path;
8
9use itertools::Either;
10use namada_core::borsh::{BorshDeserialize, BorshSerialize};
11use namada_core::chain::{BlockHeader, BlockHeight, Epoch};
12use namada_core::hash::Hash;
13use namada_core::storage::{DbColFam, KEY_SEGMENT_SEPARATOR, Key, KeySeg};
14use namada_core::{decode, encode, ethereum_events};
15use namada_gas::Gas;
16use namada_merkle_tree::{
17    MerkleTreeStoresRead, MerkleTreeStoresWrite, StoreType,
18    tree_key_prefix_with_epoch, tree_key_prefix_with_height,
19};
20use namada_replay_protection as replay_protection;
21use regex::Regex;
22
23use crate::DBUpdateVisitor;
24use crate::db::{
25    BlockStateRead, BlockStateWrite, DB, DBIter, DBWriteBatch, Error, Result,
26};
27use crate::types::{KVBytes, PatternIterator, PrefixIterator};
28
29const SUBSPACE_CF: &str = "subspace";
30
31const BLOCK_HEIGHT_KEY: &str = "height";
32const NEXT_EPOCH_MIN_START_HEIGHT_KEY: &str = "next_epoch_min_start_height";
33const NEXT_EPOCH_MIN_START_TIME_KEY: &str = "next_epoch_min_start_time";
34const UPDATE_EPOCH_BLOCKS_DELAY_KEY: &str = "update_epoch_blocks_delay";
35const COMMIT_ONLY_DATA_KEY: &str = "commit_only_data_commitment";
36const CONVERSION_STATE_KEY: &str = "conversion_state";
37const ETHEREUM_HEIGHT_KEY: &str = "ethereum_height";
38const ETH_EVENTS_QUEUE_KEY: &str = "eth_events_queue";
39const RESULTS_KEY_PREFIX: &str = "results";
40
41const MERKLE_TREE_ROOT_KEY_SEGMENT: &str = "root";
42const MERKLE_TREE_STORE_KEY_SEGMENT: &str = "store";
43const BLOCK_HEADER_KEY_SEGMENT: &str = "header";
44const BLOCK_TIME_KEY_SEGMENT: &str = "time";
45const EPOCH_KEY_SEGMENT: &str = "epoch";
46const PRED_EPOCHS_KEY_SEGMENT: &str = "pred_epochs";
47const ADDRESS_GEN_KEY_SEGMENT: &str = "address_gen";
48
49const OLD_DIFF_PREFIX: &str = "old";
50const NEW_DIFF_PREFIX: &str = "new";
51
52/// An in-memory DB for testing.
53#[derive(Debug, Default)]
54pub struct MockDB(
55    // The state is wrapped in `RefCell` to allow modifying it directly from
56    // batch write method (which requires immutable self ref).
57    RefCell<BTreeMap<String, Vec<u8>>>,
58);
59
60// The `MockDB` is not `Sync`, but we're sharing it across threads for reading
61// only (for parallelized VP runs). In a different context, this may not be
62// safe.
63unsafe impl Sync for MockDB {}
64
65/// An in-memory write batch is not needed as it just updates values in memory.
66/// It's here to satisfy the storage interface.
67#[derive(Debug, Default)]
68pub struct MockDBWriteBatch;
69
70impl MockDB {
71    fn read_value<T>(&self, key: impl AsRef<str>) -> Result<Option<T>>
72    where
73        T: BorshDeserialize,
74    {
75        self.0
76            .borrow()
77            .get(key.as_ref())
78            .map(|bytes| decode(bytes).map_err(Error::CodingError))
79            .transpose()
80    }
81
82    fn write_value<T>(&self, key: impl AsRef<str>, value: &T)
83    where
84        T: BorshSerialize,
85    {
86        self.0
87            .borrow_mut()
88            .insert(key.as_ref().to_string(), encode(value));
89    }
90}
91
92/// Source to restore a [`MockDB`] from.
93///
94/// Since this enum has no variants, you can't
95/// actually restore a [`MockDB`] instance.
96pub enum MockDBRestoreSource {}
97
98impl DB for MockDB {
99    /// There is no cache for MockDB
100    type Cache = ();
101    type Migrator = ();
102    type RestoreSource<'a> = MockDBRestoreSource;
103    type WriteBatch = MockDBWriteBatch;
104
105    fn open(_db_path: impl AsRef<Path>, _cache: Option<&Self::Cache>) -> Self {
106        Self::default()
107    }
108
109    fn open_read_only(
110        _db_path: impl AsRef<Path>,
111        _cache: Option<&Self::Cache>,
112    ) -> Self {
113        Self::default()
114    }
115
116    fn restore_from(&mut self, source: MockDBRestoreSource) -> Result<()> {
117        match source {}
118    }
119
120    fn flush(&self, _wait: bool) -> Result<()> {
121        Ok(())
122    }
123
124    fn read_last_block(&self) -> Result<Option<BlockStateRead>> {
125        // Block height
126        let height: BlockHeight = match self.read_value(BLOCK_HEIGHT_KEY)? {
127            Some(h) => h,
128            None => return Ok(None),
129        };
130
131        // Epoch start height and time
132        let next_epoch_min_start_height =
133            match self.read_value(NEXT_EPOCH_MIN_START_HEIGHT_KEY)? {
134                Some(h) => h,
135                None => return Ok(None),
136            };
137        let next_epoch_min_start_time =
138            match self.read_value(NEXT_EPOCH_MIN_START_TIME_KEY)? {
139                Some(t) => t,
140                None => return Ok(None),
141            };
142        let update_epoch_blocks_delay =
143            match self.read_value(UPDATE_EPOCH_BLOCKS_DELAY_KEY)? {
144                Some(d) => d,
145                None => return Ok(None),
146            };
147        let commit_only_data = match self.read_value(COMMIT_ONLY_DATA_KEY)? {
148            Some(d) => d,
149            None => return Ok(None),
150        };
151        let conversion_state = match self.read_value(CONVERSION_STATE_KEY)? {
152            Some(c) => c,
153            None => return Ok(None),
154        };
155
156        let ethereum_height = match self.read_value(ETHEREUM_HEIGHT_KEY)? {
157            Some(h) => h,
158            None => return Ok(None),
159        };
160
161        let eth_events_queue = match self.read_value(ETH_EVENTS_QUEUE_KEY)? {
162            Some(q) => q,
163            None => return Ok(None),
164        };
165
166        // Block results
167        let results_key = format!("{RESULTS_KEY_PREFIX}/{}", height.raw());
168        let results = match self.read_value(results_key)? {
169            Some(r) => r,
170            None => return Ok(None),
171        };
172
173        let prefix = height.raw();
174
175        let time_key = format!("{prefix}/{BLOCK_TIME_KEY_SEGMENT}");
176        let time = match self.read_value(time_key)? {
177            Some(t) => t,
178            None => return Ok(None),
179        };
180
181        let epoch_key = format!("{prefix}/{EPOCH_KEY_SEGMENT}");
182        let epoch = match self.read_value(epoch_key)? {
183            Some(e) => e,
184            None => return Ok(None),
185        };
186
187        let pred_epochs_key = format!("{prefix}/{PRED_EPOCHS_KEY_SEGMENT}");
188        let pred_epochs = match self.read_value(pred_epochs_key)? {
189            Some(e) => e,
190            None => return Ok(None),
191        };
192
193        let address_gen_key = format!("{prefix}/{ADDRESS_GEN_KEY_SEGMENT}");
194        let address_gen = match self.read_value(address_gen_key)? {
195            Some(a) => a,
196            None => return Ok(None),
197        };
198
199        Ok(Some(BlockStateRead {
200            height,
201            time,
202            epoch,
203            pred_epochs,
204            results,
205            conversion_state,
206            next_epoch_min_start_height,
207            next_epoch_min_start_time,
208            update_epoch_blocks_delay,
209            address_gen,
210            ethereum_height,
211            eth_events_queue,
212            commit_only_data,
213        }))
214    }
215
216    fn add_block_to_batch(
217        &self,
218        state: BlockStateWrite<'_>,
219        _batch: &mut Self::WriteBatch,
220        is_full_commit: bool,
221    ) -> Result<()> {
222        let BlockStateWrite {
223            merkle_tree_stores,
224            header,
225            time,
226            height,
227            epoch,
228            pred_epochs,
229            next_epoch_min_start_height,
230            next_epoch_min_start_time,
231            update_epoch_blocks_delay,
232            address_gen,
233            results,
234            conversion_state,
235            ethereum_height,
236            eth_events_queue,
237            commit_only_data,
238        }: BlockStateWrite<'_> = state;
239
240        self.write_value(
241            NEXT_EPOCH_MIN_START_HEIGHT_KEY,
242            &next_epoch_min_start_height,
243        );
244        self.write_value(
245            NEXT_EPOCH_MIN_START_TIME_KEY,
246            &next_epoch_min_start_time,
247        );
248        self.write_value(
249            UPDATE_EPOCH_BLOCKS_DELAY_KEY,
250            &update_epoch_blocks_delay,
251        );
252        self.write_value(ETHEREUM_HEIGHT_KEY, &ethereum_height);
253        self.write_value(ETH_EVENTS_QUEUE_KEY, &eth_events_queue);
254        self.write_value(CONVERSION_STATE_KEY, &conversion_state);
255        self.write_value(COMMIT_ONLY_DATA_KEY, &commit_only_data);
256
257        let prefix = height.raw();
258
259        // Merkle tree
260        for st in StoreType::iter() {
261            if st.is_stored_every_block() || is_full_commit {
262                let key_prefix = if st.is_stored_every_block() {
263                    tree_key_prefix_with_height(st, height)
264                } else {
265                    tree_key_prefix_with_epoch(st, epoch)
266                };
267                let root_key =
268                    format!("{key_prefix}/{MERKLE_TREE_ROOT_KEY_SEGMENT}");
269                self.write_value(root_key, merkle_tree_stores.root(st));
270                let store_key =
271                    format!("{key_prefix}/{MERKLE_TREE_STORE_KEY_SEGMENT}");
272                self.0
273                    .borrow_mut()
274                    .insert(store_key, merkle_tree_stores.store(st).encode());
275            }
276        }
277        // Block header
278        if let Some(h) = header {
279            let header_key = format!("{prefix}/{BLOCK_HEADER_KEY_SEGMENT}");
280            self.write_value(header_key, &h);
281        }
282        // Block time
283        let time_key = format!("{prefix}/{BLOCK_TIME_KEY_SEGMENT}");
284        self.write_value(time_key, &time);
285        // Block epoch
286        let epoch_key = format!("{prefix}/{EPOCH_KEY_SEGMENT}");
287        self.write_value(epoch_key, &epoch);
288        // Block results
289        let results_key = format!("{RESULTS_KEY_PREFIX}/{}", height.raw());
290        self.write_value(results_key, &results);
291        // Predecessor block epochs
292        let pred_epochs_key = format!("{prefix}/{PRED_EPOCHS_KEY_SEGMENT}");
293        self.write_value(pred_epochs_key, &pred_epochs);
294        // Address gen
295        let address_gen_key = format!("{prefix}/{ADDRESS_GEN_KEY_SEGMENT}");
296        self.write_value(address_gen_key, &address_gen);
297
298        // Block height
299        self.write_value(BLOCK_HEIGHT_KEY, &height);
300
301        Ok(())
302    }
303
304    fn read_block_header(
305        &self,
306        height: BlockHeight,
307    ) -> Result<Option<BlockHeader>> {
308        let header_key = format!("{}/{BLOCK_HEADER_KEY_SEGMENT}", height.raw());
309        self.read_value(header_key)
310    }
311
312    fn read_merkle_tree_stores(
313        &self,
314        epoch: Epoch,
315        base_height: BlockHeight,
316        store_type: Option<StoreType>,
317    ) -> Result<Option<MerkleTreeStoresRead>> {
318        let mut merkle_tree_stores = MerkleTreeStoresRead::default();
319        let store_types = store_type
320            .as_ref()
321            .map(|st| Either::Left(std::iter::once(st)))
322            .unwrap_or_else(|| Either::Right(StoreType::iter()));
323        for st in store_types {
324            let key_prefix = if st.is_stored_every_block() {
325                tree_key_prefix_with_height(st, base_height)
326            } else {
327                tree_key_prefix_with_epoch(st, epoch)
328            };
329            let root_key =
330                format!("{key_prefix}/{MERKLE_TREE_ROOT_KEY_SEGMENT}");
331            match self.read_value(root_key)? {
332                Some(root) => merkle_tree_stores.set_root(st, root),
333                None if store_type.is_some() => return Ok(None),
334                _ => continue,
335            }
336            let store_key =
337                format!("{key_prefix}/{MERKLE_TREE_STORE_KEY_SEGMENT}");
338            let bytes = self.0.borrow().get(&store_key.to_string()).cloned();
339            match bytes {
340                Some(b) => merkle_tree_stores.set_store(st.decode_store(b)?),
341                None if store_type.is_some() => return Ok(None),
342                _ => continue,
343            }
344        }
345        Ok(Some(merkle_tree_stores))
346    }
347
348    fn has_replay_protection_entry(&self, hash: &Hash) -> Result<bool> {
349        let prefix_key =
350            Key::parse("replay_protection").map_err(Error::KeyError)?;
351        let key = prefix_key.join(&replay_protection::key(hash));
352        let current_key =
353            prefix_key.join(&replay_protection::current_key(hash));
354        if self.0.borrow().contains_key(&key.to_string())
355            || self.0.borrow().contains_key(&current_key.to_string())
356        {
357            return Ok(true);
358        }
359
360        Ok(false)
361    }
362
363    fn read_diffs_val(
364        &self,
365        key: &Key,
366        height: BlockHeight,
367        is_old: bool,
368    ) -> Result<Option<Vec<u8>>> {
369        let old_new_seg = if is_old {
370            OLD_DIFF_PREFIX
371        } else {
372            NEW_DIFF_PREFIX
373        };
374
375        let prefix = Key::from(height.to_db_key())
376            .push(&old_new_seg.to_string().to_db_key())
377            .map_err(Error::KeyError)?
378            .join(key);
379
380        Ok(self.0.borrow().get(&prefix.to_string()).cloned())
381    }
382
383    fn read_subspace_val(&self, key: &Key) -> Result<Option<Vec<u8>>> {
384        let key = Key::parse(SUBSPACE_CF).map_err(Error::KeyError)?.join(key);
385        Ok(self.0.borrow().get(&key.to_string()).cloned())
386    }
387
388    fn read_subspace_val_with_height(
389        &self,
390        key: &Key,
391        height: BlockHeight,
392        last_height: BlockHeight,
393    ) -> Result<Option<Vec<u8>>> {
394        if height == last_height {
395            self.read_subspace_val(key)
396        } else {
397            // Quick-n-dirty implementation for reading subspace value at
398            // height:
399            // - See if there are any diffs between height+1..last_height.
400            // - If so, the first one will provide the value we want as its old
401            //   value.
402            // - If not, we can just read the value at the latest height.
403            for h in (height.0 + 1)..=last_height.0 {
404                let old_diff = self.read_diffs_val(key, h.into(), true)?;
405                let new_diff = self.read_diffs_val(key, h.into(), false)?;
406
407                match (old_diff, new_diff) {
408                    (Some(old_diff), Some(_)) | (Some(old_diff), None) => {
409                        // If there is an old diff, it contains the value at the
410                        // requested height.
411                        return Ok(Some(old_diff));
412                    }
413                    (None, Some(_)) => {
414                        // If there is a new diff but no old diff, there was
415                        // no value at the requested height.
416                        return Ok(None);
417                    }
418                    (None, None) => {
419                        // If there are no diffs, keep looking.
420                        continue;
421                    }
422                }
423            }
424
425            self.read_subspace_val(key)
426        }
427    }
428
429    fn write_subspace_val(
430        &mut self,
431        height: BlockHeight,
432        key: &Key,
433        value: impl AsRef<[u8]>,
434        persist_diffs: bool,
435    ) -> Result<i64> {
436        // batch_write are directly committed
437        self.batch_write_subspace_val(
438            &mut MockDBWriteBatch,
439            height,
440            key,
441            value,
442            persist_diffs,
443        )
444    }
445
446    fn delete_subspace_val(
447        &mut self,
448        height: BlockHeight,
449        key: &Key,
450        persist_diffs: bool,
451    ) -> Result<i64> {
452        // batch_delete are directly committed
453        self.batch_delete_subspace_val(
454            &mut MockDBWriteBatch,
455            height,
456            key,
457            persist_diffs,
458        )
459    }
460
461    fn batch() -> Self::WriteBatch {
462        MockDBWriteBatch
463    }
464
465    fn exec_batch(&self, _batch: Self::WriteBatch) -> Result<()> {
466        // Nothing to do - in MockDB, batch writes are committed directly from
467        // `batch_write_subspace_val` and `batch_delete_subspace_val`.
468        Ok(())
469    }
470
471    fn batch_write_subspace_val(
472        &self,
473        _batch: &mut Self::WriteBatch,
474        height: BlockHeight,
475        key: &Key,
476        value: impl AsRef<[u8]>,
477        persist_diffs: bool,
478    ) -> Result<i64> {
479        let value = value.as_ref();
480        let subspace_key =
481            Key::parse(SUBSPACE_CF).map_err(Error::KeyError)?.join(key);
482        let current_len = value.len() as i64;
483        let diff_prefix = Key::from(height.to_db_key());
484        let mut db = self.0.borrow_mut();
485
486        // Diffs - Note that this is different from RocksDB that has a separate
487        // CF for non-persisted diffs (ROLLBACK_CF)
488        let size_diff =
489            match db.insert(subspace_key.to_string(), value.to_owned()) {
490                Some(prev_value) => {
491                    let old_key = diff_prefix
492                        .push(&OLD_DIFF_PREFIX.to_string().to_db_key())
493                        .unwrap()
494                        .join(key);
495                    db.insert(old_key.to_string(), prev_value.clone());
496                    let new_key = diff_prefix
497                        .push(&NEW_DIFF_PREFIX.to_string().to_db_key())
498                        .unwrap()
499                        .join(key);
500                    db.insert(new_key.to_string(), value.to_owned());
501                    current_len - prev_value.len() as i64
502                }
503                None => {
504                    let new_key = diff_prefix
505                        .push(&NEW_DIFF_PREFIX.to_string().to_db_key())
506                        .unwrap()
507                        .join(key);
508                    db.insert(new_key.to_string(), value.to_owned());
509                    current_len
510                }
511            };
512
513        if !persist_diffs {
514            if let Some(pruned_height) = height.0.checked_sub(1) {
515                let pruned_key_prefix = Key::from(pruned_height.to_db_key());
516                let old_val_key = pruned_key_prefix
517                    .push(&NEW_DIFF_PREFIX.to_string().to_db_key())
518                    .unwrap()
519                    .join(key)
520                    .to_string();
521                db.remove(&old_val_key);
522                let new_val_key = pruned_key_prefix
523                    .push(&NEW_DIFF_PREFIX.to_string().to_db_key())
524                    .unwrap()
525                    .join(key)
526                    .to_string();
527                db.remove(&new_val_key);
528            }
529        }
530
531        Ok(size_diff)
532    }
533
534    fn batch_delete_subspace_val(
535        &self,
536        _batch: &mut Self::WriteBatch,
537        height: BlockHeight,
538        key: &Key,
539        persist_diffs: bool,
540    ) -> Result<i64> {
541        let subspace_key =
542            Key::parse(SUBSPACE_CF).map_err(Error::KeyError)?.join(key);
543        let diff_prefix = Key::from(height.to_db_key());
544        let mut db = self.0.borrow_mut();
545
546        // Diffs - Note that this is different from RocksDB that has a separate
547        // CF for non-persisted diffs (ROLLBACK_CF)
548        let size_diff = match db.remove(&subspace_key.to_string()) {
549            Some(value) => {
550                let old_key = diff_prefix
551                    .push(&OLD_DIFF_PREFIX.to_string().to_db_key())
552                    .unwrap()
553                    .join(key);
554                db.insert(old_key.to_string(), value.clone());
555
556                if !persist_diffs {
557                    if let Some(pruned_height) = height.0.checked_sub(1) {
558                        let pruned_key_prefix =
559                            Key::from(pruned_height.to_db_key());
560                        let old_val_key = pruned_key_prefix
561                            .push(&NEW_DIFF_PREFIX.to_string().to_db_key())
562                            .unwrap()
563                            .join(key)
564                            .to_string();
565                        db.remove(&old_val_key);
566                        let new_val_key = pruned_key_prefix
567                            .push(&NEW_DIFF_PREFIX.to_string().to_db_key())
568                            .unwrap()
569                            .join(key)
570                            .to_string();
571                        db.remove(&new_val_key);
572                    }
573                }
574                value.len() as i64
575            }
576            None => 0,
577        };
578
579        Ok(size_diff)
580    }
581
582    fn prune_merkle_tree_store(
583        &mut self,
584        _batch: &mut Self::WriteBatch,
585        store_type: &StoreType,
586        pruned_target: Either<BlockHeight, Epoch>,
587    ) -> Result<()> {
588        let key_prefix = match pruned_target {
589            Either::Left(height) => {
590                tree_key_prefix_with_height(store_type, height)
591            }
592            Either::Right(epoch) => {
593                tree_key_prefix_with_epoch(store_type, epoch)
594            }
595        };
596        let root_key = format!("{key_prefix}/{MERKLE_TREE_ROOT_KEY_SEGMENT}");
597        self.0.borrow_mut().remove(&root_key);
598        let store_key = format!("{key_prefix}/{MERKLE_TREE_STORE_KEY_SEGMENT}");
599        self.0.borrow_mut().remove(&store_key);
600        Ok(())
601    }
602
603    fn read_bridge_pool_signed_nonce(
604        &self,
605        _height: BlockHeight,
606        _last_height: BlockHeight,
607    ) -> Result<Option<ethereum_events::Uint>> {
608        Ok(None)
609    }
610
611    fn write_replay_protection_entry(
612        &mut self,
613        _batch: &mut Self::WriteBatch,
614        key: &Key,
615    ) -> Result<()> {
616        let key = Key::parse("replay_protection")
617            .map_err(Error::KeyError)?
618            .join(key);
619
620        match self.0.borrow_mut().insert(key.to_string(), vec![]) {
621            Some(_) => Err(Error::DBError(format!(
622                "Replay protection key {key} already in storage"
623            ))),
624            None => Ok(()),
625        }
626    }
627
628    fn move_current_replay_protection_entries(
629        &mut self,
630        _batch: &mut Self::WriteBatch,
631    ) -> Result<()> {
632        let current_key_prefix = Key::parse("replay_protection")
633            .map_err(Error::KeyError)?
634            .push(&"current".to_string())
635            .map_err(Error::KeyError)?;
636        let mut target_hashes = vec![];
637
638        for (key, _) in self.0.borrow().iter() {
639            if key.starts_with(&current_key_prefix.to_string()) {
640                let hash = key
641                    .rsplit(KEY_SEGMENT_SEPARATOR)
642                    .next_back()
643                    .unwrap()
644                    .to_string();
645                target_hashes.push(hash);
646            }
647        }
648
649        for hash in target_hashes {
650            let current_key =
651                current_key_prefix.push(&hash).map_err(Error::KeyError)?;
652            let key = Key::parse("replay_protection")
653                .map_err(Error::KeyError)?
654                .push(&hash)
655                .map_err(Error::KeyError)?;
656
657            self.0.borrow_mut().remove(&current_key.to_string());
658            self.0.borrow_mut().insert(key.to_string(), vec![]);
659        }
660
661        Ok(())
662    }
663
664    fn prune_non_persisted_diffs(
665        &mut self,
666        _batch: &mut Self::WriteBatch,
667        _height: BlockHeight,
668    ) -> Result<()> {
669        // No-op - Note that this is different from RocksDB that has a separate
670        // CF for non-persisted diffs (ROLLBACK_CF)
671        Ok(())
672    }
673
674    fn overwrite_entry(
675        &self,
676        _batch: &mut Self::WriteBatch,
677        _cf: &DbColFam,
678        _key: &Key,
679        _new_value: impl AsRef<[u8]>,
680        _persist_diffs: bool,
681    ) -> Result<()> {
682        unimplemented!()
683    }
684
685    fn migrator() -> Self::Migrator {
686        unimplemented!("Migration isn't implemented in MockDB")
687    }
688
689    fn update_last_block_merkle_tree(
690        &self,
691        merkle_tree_stores: MerkleTreeStoresWrite<'_>,
692        is_full_commit: bool,
693    ) -> Result<()> {
694        // Read the last block's height
695        let height: BlockHeight = self.read_value(BLOCK_HEIGHT_KEY)?.unwrap();
696
697        // Read the last block's epoch
698        let prefix = height.raw();
699        let epoch_key = format!("{prefix}/{EPOCH_KEY_SEGMENT}");
700        let epoch: Epoch = self.read_value(epoch_key)?.unwrap();
701
702        for st in StoreType::iter() {
703            if st.is_stored_every_block() || is_full_commit {
704                let key_prefix = if st.is_stored_every_block() {
705                    tree_key_prefix_with_height(st, height)
706                } else {
707                    tree_key_prefix_with_epoch(st, epoch)
708                };
709                let root_key =
710                    format!("{key_prefix}/{MERKLE_TREE_ROOT_KEY_SEGMENT}");
711                self.write_value(root_key, merkle_tree_stores.root(st));
712                let store_key =
713                    format!("{key_prefix}/{MERKLE_TREE_STORE_KEY_SEGMENT}");
714                self.0
715                    .borrow_mut()
716                    .insert(store_key, merkle_tree_stores.store(st).encode());
717            }
718        }
719        Ok(())
720    }
721}
722
723impl<'iter> DBIter<'iter> for MockDB {
724    type PatternIter = MockPatternIterator;
725    type PrefixIter = MockPrefixIterator;
726
727    fn iter_prefix(&'iter self, prefix: Option<&Key>) -> MockPrefixIterator {
728        let stripped_prefix = "subspace/".to_owned();
729        let prefix = format!(
730            "{}{}",
731            stripped_prefix,
732            match prefix {
733                Some(prefix) => {
734                    if prefix == &Key::default() {
735                        prefix.to_string()
736                    } else {
737                        format!("{prefix}/")
738                    }
739                }
740                None => "".to_string(),
741            }
742        );
743        let iter = self.0.borrow().clone().into_iter();
744        MockPrefixIterator::new(MockIterator { prefix, iter }, stripped_prefix)
745    }
746
747    fn iter_pattern(
748        &'iter self,
749        prefix: Option<&Key>,
750        pattern: Regex,
751    ) -> Self::PatternIter {
752        MockPatternIterator {
753            inner: PatternIterator {
754                iter: self.iter_prefix(prefix),
755                pattern,
756            },
757            finished: false,
758        }
759    }
760
761    fn iter_results(&'iter self) -> MockPrefixIterator {
762        let stripped_prefix = "results/".to_owned();
763        let prefix = "results".to_owned();
764        let iter = self.0.borrow().clone().into_iter();
765        MockPrefixIterator::new(MockIterator { prefix, iter }, stripped_prefix)
766    }
767
768    fn iter_old_diffs(
769        &self,
770        height: BlockHeight,
771        prefix: Option<&Key>,
772    ) -> MockPrefixIterator {
773        // Returns an empty iterator since Mock DB can read only the latest
774        // value for now
775        let stripped_prefix = format!("{}/old/", height.0.raw());
776        let prefix = prefix
777            .map(|k| {
778                if k == &Key::default() {
779                    stripped_prefix.clone()
780                } else {
781                    format!("{stripped_prefix}{k}/")
782                }
783            })
784            .unwrap_or("".to_string());
785        let iter = self.0.borrow().clone().into_iter();
786        MockPrefixIterator::new(MockIterator { prefix, iter }, stripped_prefix)
787    }
788
789    fn iter_new_diffs(
790        &self,
791        height: BlockHeight,
792        prefix: Option<&Key>,
793    ) -> MockPrefixIterator {
794        // Returns an empty iterator since Mock DB can read only the latest
795        // value for now
796        let stripped_prefix = format!("{}/new/", height.0.raw());
797        let prefix = prefix
798            .map(|k| {
799                if k == &Key::default() {
800                    stripped_prefix.clone()
801                } else {
802                    format!("{stripped_prefix}{k}/")
803                }
804            })
805            .unwrap_or("".to_string());
806        let iter = self.0.borrow().clone().into_iter();
807        MockPrefixIterator::new(MockIterator { prefix, iter }, stripped_prefix)
808    }
809
810    fn iter_current_replay_protection(&'iter self) -> Self::PrefixIter {
811        let stripped_prefix = format!(
812            "replay_protection/{}/",
813            replay_protection::current_prefix()
814        );
815        let prefix = stripped_prefix.clone();
816        let iter = self.0.borrow().clone().into_iter();
817        MockPrefixIterator::new(MockIterator { prefix, iter }, stripped_prefix)
818    }
819}
820
821/// A prefix iterator base for the [`MockPrefixIterator`].
822#[derive(Debug)]
823pub struct MockIterator {
824    prefix: String,
825    /// The concrete iterator
826    pub iter: btree_map::IntoIter<String, Vec<u8>>,
827}
828
829/// A prefix iterator for the [`MockDB`].
830pub type MockPrefixIterator = PrefixIterator<MockIterator>;
831
832impl Iterator for MockIterator {
833    type Item = Result<KVBytes>;
834
835    fn next(&mut self) -> Option<Self::Item> {
836        for (key, val) in &mut self.iter {
837            if key.starts_with(&self.prefix) {
838                return Some(Ok((
839                    Box::from(key.as_bytes()),
840                    Box::from(val.as_slice()),
841                )));
842            }
843        }
844        None
845    }
846}
847
848impl Iterator for PrefixIterator<MockIterator> {
849    type Item = (String, Vec<u8>, Gas);
850
851    /// Returns the next pair and the gas cost
852    fn next(&mut self) -> Option<(String, Vec<u8>, Gas)> {
853        match self.iter.next() {
854            Some(result) => {
855                let (key, val) =
856                    result.expect("Prefix iterator shouldn't fail");
857                let key = String::from_utf8(key.to_vec())
858                    .expect("Cannot convert from bytes to key string");
859                match key.strip_prefix(&self.stripped_prefix) {
860                    Some(k) => {
861                        let gas = k.len() + val.len();
862                        Some((k.to_owned(), val.to_vec(), (gas as u64).into()))
863                    }
864                    None => self.next(),
865                }
866            }
867            None => None,
868        }
869    }
870}
871
872/// MockDB pattern iterator
873#[derive(Debug)]
874pub struct MockPatternIterator {
875    inner: PatternIterator<MockPrefixIterator>,
876    finished: bool,
877}
878
879impl Iterator for MockPatternIterator {
880    type Item = (String, Vec<u8>, Gas);
881
882    /// Returns the next pair and the gas cost
883    fn next(&mut self) -> Option<(String, Vec<u8>, Gas)> {
884        if self.finished {
885            return None;
886        }
887        loop {
888            let next_result = self.inner.iter.next()?;
889            if self.inner.pattern.is_match(&next_result.0) {
890                return Some(next_result);
891            } else {
892                self.finished = true;
893            }
894        }
895    }
896}
897
898impl DBWriteBatch for MockDBWriteBatch {}
899
900impl DBUpdateVisitor for () {
901    type DB = crate::mockdb::MockDB;
902
903    fn read(
904        &self,
905        _db: &Self::DB,
906        _key: &Key,
907        _cf: &DbColFam,
908    ) -> Option<Vec<u8>> {
909        unimplemented!()
910    }
911
912    fn write(
913        &mut self,
914        _db: &Self::DB,
915        _key: &Key,
916        _cf: &DbColFam,
917        _value: impl AsRef<[u8]>,
918        _persist_diffs: bool,
919    ) {
920        unimplemented!()
921    }
922
923    fn delete(
924        &mut self,
925        _db: &Self::DB,
926        _key: &Key,
927        _cf: &DbColFam,
928        _persist_diffs: bool,
929    ) {
930        unimplemented!()
931    }
932
933    fn get_pattern(
934        &self,
935        _db: &Self::DB,
936        _pattern: Regex,
937    ) -> Vec<(String, Vec<u8>)> {
938        unimplemented!()
939    }
940
941    fn commit(self, _db: &Self::DB) -> Result<()> {
942        unimplemented!()
943    }
944}