foundry_fork_db/
cache.rs

1//! Cache related abstraction
2
3use alloy_chains::Chain;
4use alloy_consensus::BlockHeader;
5use alloy_hardforks::EthereumHardfork;
6use alloy_primitives::{Address, B256, U256};
7use alloy_provider::network::TransactionResponse;
8use parking_lot::RwLock;
9use revm::{
10    context::BlockEnv,
11    context_interface::block::BlobExcessGasAndPrice,
12    primitives::{
13        eip4844::{BLOB_BASE_FEE_UPDATE_FRACTION_CANCUN, BLOB_BASE_FEE_UPDATE_FRACTION_PRAGUE},
14        map::{AddressHashMap, HashMap},
15        KECCAK_EMPTY,
16    },
17    state::{Account, AccountInfo, AccountStatus},
18    DatabaseCommit,
19};
20use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer};
21use std::{
22    collections::BTreeSet,
23    fs,
24    io::{BufWriter, Write},
25    path::{Path, PathBuf},
26    sync::Arc,
27};
28use url::Url;
29
30pub type StorageInfo = HashMap<U256, U256>;
31
32/// A shareable Block database
33#[derive(Clone, Debug)]
34pub struct BlockchainDb {
35    /// Contains all the data
36    db: Arc<MemDb>,
37    /// metadata of the current config
38    meta: Arc<RwLock<BlockchainDbMeta>>,
39    /// the cache that can be flushed
40    cache: Arc<JsonBlockCacheDB>,
41}
42
43impl BlockchainDb {
44    /// Creates a new instance of the [BlockchainDb].
45    ///
46    /// If a `cache_path` is provided it attempts to load a previously stored [JsonBlockCacheData]
47    /// and will try to use the cached entries it holds.
48    ///
49    /// This will return a new and empty [MemDb] if
50    ///   - `cache_path` is `None`
51    ///   - the file the `cache_path` points to, does not exist
52    ///   - the file contains malformed data, or if it couldn't be read
53    ///   - the provided `meta` differs from [BlockchainDbMeta] that's stored on disk
54    pub fn new(meta: BlockchainDbMeta, cache_path: Option<PathBuf>) -> Self {
55        Self::new_db(meta, cache_path, false)
56    }
57
58    /// Creates a new instance of the [BlockchainDb] and skips check when comparing meta
59    /// This is useful for offline-start mode when we don't want to fetch metadata of `block`.
60    ///
61    /// if a `cache_path` is provided it attempts to load a previously stored [JsonBlockCacheData]
62    /// and will try to use the cached entries it holds.
63    ///
64    /// This will return a new and empty [MemDb] if
65    ///   - `cache_path` is `None`
66    ///   - the file the `cache_path` points to, does not exist
67    ///   - the file contains malformed data, or if it couldn't be read
68    ///   - the provided `meta` differs from [BlockchainDbMeta] that's stored on disk
69    pub fn new_skip_check(meta: BlockchainDbMeta, cache_path: Option<PathBuf>) -> Self {
70        Self::new_db(meta, cache_path, true)
71    }
72
73    fn new_db(meta: BlockchainDbMeta, cache_path: Option<PathBuf>, skip_check: bool) -> Self {
74        trace!(target: "forge::cache", cache=?cache_path, "initialising blockchain db");
75        // read cache and check if metadata matches
76        let cache = cache_path
77            .as_ref()
78            .and_then(|p| {
79                JsonBlockCacheDB::load(p).ok().filter(|cache| {
80                    if skip_check {
81                        return true;
82                    }
83                    let mut existing = cache.meta().write();
84                    existing.hosts.extend(meta.hosts.clone());
85                    if meta != *existing {
86                        warn!(target: "cache", "non-matching block metadata");
87                        false
88                    } else {
89                        true
90                    }
91                })
92            })
93            .unwrap_or_else(|| JsonBlockCacheDB::new(Arc::new(RwLock::new(meta)), cache_path));
94
95        Self { db: Arc::clone(cache.db()), meta: Arc::clone(cache.meta()), cache: Arc::new(cache) }
96    }
97
98    /// Returns the map that holds the account related info
99    pub fn accounts(&self) -> &RwLock<AddressHashMap<AccountInfo>> {
100        &self.db.accounts
101    }
102
103    /// Returns the map that holds the storage related info
104    pub fn storage(&self) -> &RwLock<AddressHashMap<StorageInfo>> {
105        &self.db.storage
106    }
107
108    /// Returns the map that holds all the block hashes
109    pub fn block_hashes(&self) -> &RwLock<HashMap<U256, B256>> {
110        &self.db.block_hashes
111    }
112
113    /// Returns the Env related metadata
114    pub const fn meta(&self) -> &Arc<RwLock<BlockchainDbMeta>> {
115        &self.meta
116    }
117
118    /// Returns the inner cache
119    pub const fn cache(&self) -> &Arc<JsonBlockCacheDB> {
120        &self.cache
121    }
122
123    /// Returns the underlying storage
124    pub const fn db(&self) -> &Arc<MemDb> {
125        &self.db
126    }
127}
128
129/// relevant identifying markers in the context of [BlockchainDb]
130#[derive(Clone, Debug, Default, Eq, Serialize)]
131pub struct BlockchainDbMeta {
132    /// The chain of the blockchain of the block environment
133    #[serde(default, skip_serializing_if = "Option::is_none")]
134    pub chain: Option<Chain>,
135    /// The block environment
136    pub block_env: BlockEnv,
137    /// All the hosts used to connect to
138    pub hosts: BTreeSet<String>,
139}
140
141impl BlockchainDbMeta {
142    /// Creates a new instance
143    pub fn new(block_env: BlockEnv, url: String) -> Self {
144        let host = Url::parse(&url)
145            .ok()
146            .and_then(|url| url.host().map(|host| host.to_string()))
147            .unwrap_or(url);
148
149        Self { chain: None, block_env, hosts: BTreeSet::from([host]) }
150    }
151
152    /// Sets the [BlockEnv] of this instance using the provided [Chain] and [alloy_rpc_types::Block]
153    pub fn with_block<T: TransactionResponse, H: BlockHeader>(
154        mut self,
155        block: &alloy_rpc_types::Block<T, H>,
156    ) -> Self {
157        let blob_base_fee_update_fraction =
158            self.chain.map_or(BLOB_BASE_FEE_UPDATE_FRACTION_PRAGUE, |chain| {
159                match EthereumHardfork::from_chain_and_timestamp(chain, block.header.timestamp()) {
160                    Some(EthereumHardfork::Cancun) => BLOB_BASE_FEE_UPDATE_FRACTION_CANCUN,
161                    _ => BLOB_BASE_FEE_UPDATE_FRACTION_PRAGUE,
162                }
163            });
164
165        self.block_env = BlockEnv {
166            number: U256::from(block.header.number()),
167            beneficiary: block.header.beneficiary(),
168            timestamp: U256::from(block.header.timestamp()),
169            difficulty: U256::from(block.header.difficulty()),
170            basefee: block.header.base_fee_per_gas().unwrap_or_default(),
171            gas_limit: block.header.gas_limit(),
172            prevrandao: block.header.mix_hash(),
173            blob_excess_gas_and_price: Some(BlobExcessGasAndPrice::new(
174                block.header.excess_blob_gas().unwrap_or_default(),
175                blob_base_fee_update_fraction,
176            )),
177        };
178
179        self
180    }
181
182    /// Infers the host from the provided url and adds it to the set of hosts
183    pub fn with_url(mut self, url: &str) -> Self {
184        let host = Url::parse(url)
185            .ok()
186            .and_then(|url| url.host().map(|host| host.to_string()))
187            .unwrap_or(url.to_string());
188        self.hosts.insert(host);
189        self
190    }
191
192    /// Sets the [Chain] of this instance
193    pub fn set_chain(mut self, chain: Chain) -> Self {
194        self.chain = Some(chain);
195        self
196    }
197
198    /// Sets the [BlockEnv] of this instance
199    pub fn set_block_env(mut self, block_env: revm::context::BlockEnv) -> Self {
200        self.block_env = block_env;
201        self
202    }
203}
204
205// ignore hosts to not invalidate the cache when different endpoints are used, as it's commonly the
206// case for http vs ws endpoints
207impl PartialEq for BlockchainDbMeta {
208    fn eq(&self, other: &Self) -> bool {
209        self.block_env == other.block_env
210    }
211}
212
213impl<'de> Deserialize<'de> for BlockchainDbMeta {
214    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
215    where
216        D: Deserializer<'de>,
217    {
218        /// A backwards compatible representation of [revm::primitives::BlockEnv]
219        ///
220        /// This prevents deserialization errors of cache files caused by breaking changes to the
221        /// default [revm::primitives::BlockEnv], for example enabling an optional feature.
222        /// By hand rolling deserialize impl we can prevent cache file issues
223        struct BlockEnvBackwardsCompat {
224            inner: revm::context::BlockEnv,
225        }
226
227        impl<'de> Deserialize<'de> for BlockEnvBackwardsCompat {
228            fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
229            where
230                D: Deserializer<'de>,
231            {
232                let mut value = serde_json::Value::deserialize(deserializer)?;
233
234                // we check for any missing fields here
235                if let Some(obj) = value.as_object_mut() {
236                    let default_value =
237                        serde_json::to_value(revm::context::BlockEnv::default()).unwrap();
238                    for (key, value) in default_value.as_object().unwrap() {
239                        if !obj.contains_key(key) {
240                            obj.insert(key.to_string(), value.clone());
241                        }
242                    }
243                }
244
245                let cfg_env: revm::context::BlockEnv =
246                    serde_json::from_value(value).map_err(serde::de::Error::custom)?;
247                Ok(Self { inner: cfg_env })
248            }
249        }
250
251        // custom deserialize impl to not break existing cache files
252        #[derive(Deserialize)]
253        struct Meta {
254            chain: Option<Chain>,
255            block_env: BlockEnvBackwardsCompat,
256            /// all the hosts used to connect to
257            #[serde(alias = "host")]
258            hosts: Hosts,
259        }
260
261        #[derive(Deserialize)]
262        #[serde(untagged)]
263        enum Hosts {
264            Multi(BTreeSet<String>),
265            Single(String),
266        }
267
268        let Meta { chain, block_env, hosts } = Meta::deserialize(deserializer)?;
269        Ok(Self {
270            chain,
271            block_env: block_env.inner,
272            hosts: match hosts {
273                Hosts::Multi(hosts) => hosts,
274                Hosts::Single(host) => BTreeSet::from([host]),
275            },
276        })
277    }
278}
279
280/// In Memory cache containing all fetched accounts and storage slots
281/// and their values from RPC
282#[derive(Debug, Default)]
283pub struct MemDb {
284    /// Account related data
285    pub accounts: RwLock<AddressHashMap<AccountInfo>>,
286    /// Storage related data
287    pub storage: RwLock<AddressHashMap<StorageInfo>>,
288    /// All retrieved block hashes
289    pub block_hashes: RwLock<HashMap<U256, B256>>,
290}
291
292impl MemDb {
293    /// Clears all data stored in this db
294    pub fn clear(&self) {
295        self.accounts.write().clear();
296        self.storage.write().clear();
297        self.block_hashes.write().clear();
298    }
299
300    // Inserts the account, replacing it if it exists already
301    pub fn do_insert_account(&self, address: Address, account: AccountInfo) {
302        self.accounts.write().insert(address, account);
303    }
304
305    /// The implementation of [DatabaseCommit::commit()]
306    pub fn do_commit(&self, changes: HashMap<Address, Account>) {
307        let mut storage = self.storage.write();
308        let mut accounts = self.accounts.write();
309        for (add, mut acc) in changes {
310            if acc.is_empty() || acc.is_selfdestructed() {
311                accounts.remove(&add);
312                storage.remove(&add);
313            } else {
314                // insert account
315                if let Some(code_hash) = acc
316                    .info
317                    .code
318                    .as_ref()
319                    .filter(|code| !code.is_empty())
320                    .map(|code| code.hash_slow())
321                {
322                    acc.info.code_hash = code_hash;
323                } else if acc.info.code_hash.is_zero() {
324                    acc.info.code_hash = KECCAK_EMPTY;
325                }
326                accounts.insert(add, acc.info);
327
328                let acc_storage = storage.entry(add).or_default();
329                if acc.status.contains(AccountStatus::Created) {
330                    acc_storage.clear();
331                }
332                for (index, value) in acc.storage {
333                    if value.present_value().is_zero() {
334                        acc_storage.remove(&index);
335                    } else {
336                        acc_storage.insert(index, value.present_value());
337                    }
338                }
339                if acc_storage.is_empty() {
340                    storage.remove(&add);
341                }
342            }
343        }
344    }
345}
346
347impl Clone for MemDb {
348    fn clone(&self) -> Self {
349        Self {
350            storage: RwLock::new(self.storage.read().clone()),
351            accounts: RwLock::new(self.accounts.read().clone()),
352            block_hashes: RwLock::new(self.block_hashes.read().clone()),
353        }
354    }
355}
356
357impl DatabaseCommit for MemDb {
358    fn commit(&mut self, changes: HashMap<Address, Account>) {
359        self.do_commit(changes)
360    }
361}
362
363/// A DB that stores the cached content in a json file
364#[derive(Debug)]
365pub struct JsonBlockCacheDB {
366    /// Where this cache file is stored.
367    ///
368    /// If this is a [None] then caching is disabled
369    cache_path: Option<PathBuf>,
370    /// Object that's stored in a json file
371    data: JsonBlockCacheData,
372}
373
374impl JsonBlockCacheDB {
375    /// Creates a new instance.
376    fn new(meta: Arc<RwLock<BlockchainDbMeta>>, cache_path: Option<PathBuf>) -> Self {
377        Self { cache_path, data: JsonBlockCacheData { meta, data: Arc::new(Default::default()) } }
378    }
379
380    /// Loads the contents of the diskmap file and returns the read object
381    ///
382    /// # Errors
383    /// This will fail if
384    ///   - the `path` does not exist
385    ///   - the format does not match [JsonBlockCacheData]
386    pub fn load(path: impl Into<PathBuf>) -> eyre::Result<Self> {
387        let path = path.into();
388        trace!(target: "cache", ?path, "reading json cache");
389        let contents = std::fs::read_to_string(&path).map_err(|err| {
390            warn!(?err, ?path, "Failed to read cache file");
391            err
392        })?;
393        let data = serde_json::from_str(&contents).map_err(|err| {
394            warn!(target: "cache", ?err, ?path, "Failed to deserialize cache data");
395            err
396        })?;
397        trace!(target: "cache", ?path, "read json cache");
398        Ok(Self { cache_path: Some(path), data })
399    }
400
401    /// Returns the [MemDb] it holds access to
402    pub const fn db(&self) -> &Arc<MemDb> {
403        &self.data.data
404    }
405
406    /// Metadata stored alongside the data
407    pub const fn meta(&self) -> &Arc<RwLock<BlockchainDbMeta>> {
408        &self.data.meta
409    }
410
411    /// Returns `true` if this is a transient cache and nothing will be flushed
412    pub const fn is_transient(&self) -> bool {
413        self.cache_path.is_none()
414    }
415
416    /// Flushes the DB to disk if caching is enabled.
417    #[instrument(level = "warn", skip_all, fields(path = ?self.cache_path))]
418    pub fn flush(&self) {
419        let Some(path) = &self.cache_path else { return };
420        self.flush_to(path.as_path());
421    }
422
423    /// Flushes the DB to a specific file
424    pub fn flush_to(&self, cache_path: &Path) {
425        let path: &Path = cache_path;
426
427        trace!(target: "cache", "saving json cache");
428
429        if let Some(parent) = path.parent() {
430            let _ = fs::create_dir_all(parent);
431        }
432
433        let file = match fs::File::create(path) {
434            Ok(file) => file,
435            Err(e) => return warn!(target: "cache", %e, "Failed to open json cache for writing"),
436        };
437
438        let mut writer = BufWriter::new(file);
439        if let Err(e) = serde_json::to_writer(&mut writer, &self.data) {
440            return warn!(target: "cache", %e, "Failed to write to json cache");
441        }
442        if let Err(e) = writer.flush() {
443            return warn!(target: "cache", %e, "Failed to flush to json cache");
444        }
445
446        trace!(target: "cache", "saved json cache");
447    }
448
449    /// Returns the cache path.
450    pub fn cache_path(&self) -> Option<&Path> {
451        self.cache_path.as_deref()
452    }
453}
454
455/// The Data the [JsonBlockCacheDB] can read and flush
456///
457/// This will be deserialized in a JSON object with the keys:
458/// `["meta", "accounts", "storage", "block_hashes"]`
459#[derive(Debug)]
460pub struct JsonBlockCacheData {
461    pub meta: Arc<RwLock<BlockchainDbMeta>>,
462    pub data: Arc<MemDb>,
463}
464
465impl Serialize for JsonBlockCacheData {
466    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
467    where
468        S: Serializer,
469    {
470        let mut map = serializer.serialize_map(Some(4))?;
471
472        map.serialize_entry("meta", &self.meta.read().clone())?;
473        map.serialize_entry("accounts", &self.data.accounts.read().clone())?;
474        map.serialize_entry("storage", &self.data.storage.read().clone())?;
475        map.serialize_entry("block_hashes", &self.data.block_hashes.read().clone())?;
476
477        map.end()
478    }
479}
480
481impl<'de> Deserialize<'de> for JsonBlockCacheData {
482    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
483    where
484        D: Deserializer<'de>,
485    {
486        #[derive(Deserialize)]
487        struct Data {
488            meta: BlockchainDbMeta,
489            accounts: AddressHashMap<AccountInfo>,
490            storage: AddressHashMap<HashMap<U256, U256>>,
491            block_hashes: HashMap<U256, B256>,
492        }
493
494        let Data { meta, accounts, storage, block_hashes } = Data::deserialize(deserializer)?;
495
496        Ok(Self {
497            meta: Arc::new(RwLock::new(meta)),
498            data: Arc::new(MemDb {
499                accounts: RwLock::new(accounts),
500                storage: RwLock::new(storage),
501                block_hashes: RwLock::new(block_hashes),
502            }),
503        })
504    }
505}
506
507/// A type that flushes a `JsonBlockCacheDB` on drop
508///
509/// This type intentionally does not implement `Clone` since it's intended that there's only once
510/// instance that will flush the cache.
511#[derive(Debug)]
512pub struct FlushJsonBlockCacheDB(pub Arc<JsonBlockCacheDB>);
513
514impl Drop for FlushJsonBlockCacheDB {
515    fn drop(&mut self) {
516        trace!(target: "fork::cache", "flushing cache");
517        self.0.flush();
518        trace!(target: "fork::cache", "flushed cache");
519    }
520}
521
522#[cfg(test)]
523mod tests {
524    use super::*;
525
526    #[test]
527    fn can_deserialize_cache() {
528        let s = r#"{
529    "meta": {
530        "cfg_env": {
531            "chain_id": 1337,
532            "perf_analyse_created_bytecodes": "Analyse",
533            "limit_contract_code_size": 18446744073709551615,
534            "memory_limit": 4294967295,
535            "disable_block_gas_limit": false,
536            "disable_eip3607": false,
537            "disable_base_fee": false
538        },
539        "block_env": {
540            "number": 15547871,
541            "coinbase": "0x0000000000000000000000000000000000000000",
542            "timestamp": 1663351871,
543            "difficulty": "0x0",
544            "basefee": 12448539171,
545            "gas_limit": 30000000,
546            "prevrandao": "0x0000000000000000000000000000000000000000000000000000000000000000"
547        },
548        "hosts": [
549            "eth-mainnet.alchemyapi.io"
550        ]
551    },
552    "accounts": {
553        "0xb8ffc3cd6e7cf5a098a1c92f48009765b24088dc": {
554            "balance": "0x0",
555            "nonce": 10,
556            "code_hash": "0x3ac64c95eedf82e5d821696a12daac0e1b22c8ee18a9fd688b00cfaf14550aad",
557            "code": {
558                "LegacyAnalyzed": {
559                    "bytecode": "0x00",
560                    "original_len": 0,
561                    "jump_table": {
562                      "order": "bitvec::order::Lsb0",
563                      "head": {
564                        "width": 8,
565                        "index": 0
566                      },
567                      "bits": 1,
568                      "data": [0]
569                    }
570                }
571            }
572        }
573    },
574    "storage": {
575        "0xa354f35829ae975e850e23e9615b11da1b3dc4de": {
576            "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e564": "0x5553444320795661756c74000000000000000000000000000000000000000000",
577            "0x10": "0x37fd60ff8346",
578            "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563": "0xb",
579            "0x6": "0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48",
580            "0x5": "0x36ff5b93162e",
581            "0x14": "0x29d635a8e000",
582            "0x11": "0x63224c73",
583            "0x2": "0x6"
584        }
585    },
586    "block_hashes": {
587        "0xed3deb": "0xbf7be3174b261ea3c377b6aba4a1e05d5fae7eee7aab5691087c20cf353e9877",
588        "0xed3de9": "0xba1c3648e0aee193e7d00dffe4e9a5e420016b4880455641085a4731c1d32eef",
589        "0xed3de8": "0x61d1491c03a9295fb13395cca18b17b4fa5c64c6b8e56ee9cc0a70c3f6cf9855",
590        "0xed3de7": "0xb54560b5baeccd18350d56a3bee4035432294dc9d2b7e02f157813e1dee3a0be",
591        "0xed3dea": "0x816f124480b9661e1631c6ec9ee39350bda79f0cbfc911f925838d88e3d02e4b"
592    }
593}"#;
594
595        let cache: JsonBlockCacheData = serde_json::from_str(s).unwrap();
596        assert_eq!(cache.data.accounts.read().len(), 1);
597        assert_eq!(cache.data.storage.read().len(), 1);
598        assert_eq!(cache.data.block_hashes.read().len(), 5);
599
600        let _s = serde_json::to_string(&cache).unwrap();
601    }
602
603    #[test]
604    fn can_deserialize_cache_post_4844() {
605        let s = r#"{
606    "meta": {
607        "cfg_env": {
608            "chain_id": 1,
609            "kzg_settings": "Default",
610            "perf_analyse_created_bytecodes": "Analyse",
611            "limit_contract_code_size": 18446744073709551615,
612            "memory_limit": 134217728,
613            "disable_block_gas_limit": false,
614            "disable_eip3607": true,
615            "disable_base_fee": false,
616            "optimism": false
617        },
618        "block_env": {
619            "number": 18651580,
620            "coinbase": "0x4838b106fce9647bdf1e7877bf73ce8b0bad5f97",
621            "timestamp": 1700950019,
622            "gas_limit": 30000000,
623            "basefee": 26886078239,
624            "difficulty": "0xc6b1a299886016dea3865689f8393b9bf4d8f4fe8c0ad25f0058b3569297c057",
625            "prevrandao": "0xc6b1a299886016dea3865689f8393b9bf4d8f4fe8c0ad25f0058b3569297c057",
626            "blob_excess_gas_and_price": {
627                "excess_blob_gas": 0,
628                "blob_gasprice": 1
629            }
630        },
631        "hosts": [
632            "eth-mainnet.alchemyapi.io"
633        ]
634    },
635    "accounts": {
636        "0x4838b106fce9647bdf1e7877bf73ce8b0bad5f97": {
637            "balance": "0x8e0c373cfcdfd0eb",
638            "nonce": 128912,
639            "code_hash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
640            "code": {
641                "LegacyAnalyzed": {
642                    "bytecode": "0x00",
643                    "original_len": 0,
644                    "jump_table": {
645                      "order": "bitvec::order::Lsb0",
646                      "head": {
647                        "width": 8,
648                        "index": 0
649                      },
650                      "bits": 1,
651                      "data": [0]
652                    }
653                }
654            }
655        }
656    },
657    "storage": {},
658    "block_hashes": {}
659}"#;
660
661        let cache: JsonBlockCacheData = serde_json::from_str(s).unwrap();
662        assert_eq!(cache.data.accounts.read().len(), 1);
663
664        let _s = serde_json::to_string(&cache).unwrap();
665    }
666
667    #[test]
668    fn can_return_cache_path_if_set() {
669        // set
670        let cache_db = JsonBlockCacheDB::new(
671            Arc::new(RwLock::new(BlockchainDbMeta::default())),
672            Some(PathBuf::from("/tmp/foo")),
673        );
674        assert_eq!(Some(Path::new("/tmp/foo")), cache_db.cache_path());
675
676        // unset
677        let cache_db =
678            JsonBlockCacheDB::new(Arc::new(RwLock::new(BlockchainDbMeta::default())), None);
679        assert_eq!(None, cache_db.cache_path());
680    }
681}