ckb_store/
db.rs

1use crate::StoreSnapshot;
2use crate::cache::StoreCache;
3use crate::cell::attach_block_cell;
4use crate::store::ChainStore;
5use crate::transaction::StoreTransaction;
6use crate::write_batch::StoreWriteBatch;
7use ckb_app_config::StoreConfig;
8use ckb_chain_spec::{consensus::Consensus, versionbits::VersionbitsIndexer};
9use ckb_db::{
10    DBPinnableSlice, RocksDB,
11    iter::{DBIter, DBIterator, IteratorMode},
12};
13use ckb_db_schema::{CHAIN_SPEC_HASH_KEY, Col, MIGRATION_VERSION_KEY};
14use ckb_error::{Error, InternalErrorKind};
15use ckb_freezer::Freezer;
16use ckb_types::{
17    core::{BlockExt, EpochExt, HeaderView, TransactionView},
18    packed,
19    prelude::*,
20    utilities::merkle_mountain_range::ChainRootMMR,
21};
22use std::sync::Arc;
23
24/// A database of the chain store based on the RocksDB wrapper `RocksDB`
25#[derive(Clone)]
26pub struct ChainDB {
27    db: RocksDB,
28    freezer: Option<Freezer>,
29    cache: Arc<StoreCache>,
30}
31
32impl ChainStore for ChainDB {
33    fn cache(&self) -> Option<&StoreCache> {
34        Some(&self.cache)
35    }
36
37    fn freezer(&self) -> Option<&Freezer> {
38        self.freezer.as_ref()
39    }
40
41    fn get(&self, col: Col, key: &[u8]) -> Option<DBPinnableSlice> {
42        self.db
43            .get_pinned(col, key)
44            .expect("db operation should be ok")
45    }
46
47    fn get_iter(&self, col: Col, mode: IteratorMode) -> DBIter {
48        self.db.iter(col, mode).expect("db operation should be ok")
49    }
50}
51
52impl VersionbitsIndexer for ChainDB {
53    fn block_epoch_index(&self, block_hash: &packed::Byte32) -> Option<packed::Byte32> {
54        ChainStore::get_block_epoch_index(self, block_hash)
55    }
56
57    fn epoch_ext(&self, index: &packed::Byte32) -> Option<EpochExt> {
58        ChainStore::get_epoch_ext(self, index)
59    }
60
61    fn block_header(&self, block_hash: &packed::Byte32) -> Option<HeaderView> {
62        ChainStore::get_block_header(self, block_hash)
63    }
64
65    fn cellbase(&self, block_hash: &packed::Byte32) -> Option<TransactionView> {
66        ChainStore::get_cellbase(self, block_hash)
67    }
68}
69
70impl ChainDB {
71    /// Allocate a new ChainDB instance with the given config
72    pub fn new(db: RocksDB, config: StoreConfig) -> Self {
73        let cache = StoreCache::from_config(config);
74        ChainDB {
75            db,
76            freezer: None,
77            cache: Arc::new(cache),
78        }
79    }
80
81    /// Open new ChainDB with freezer instance
82    pub fn new_with_freezer(db: RocksDB, freezer: Freezer, config: StoreConfig) -> Self {
83        let cache = StoreCache::from_config(config);
84        ChainDB {
85            db,
86            freezer: Some(freezer),
87            cache: Arc::new(cache),
88        }
89    }
90
91    /// Return the inner RocksDB instance
92    pub fn db(&self) -> &RocksDB {
93        &self.db
94    }
95
96    /// Converts self into a `RocksDB`
97    pub fn into_inner(self) -> RocksDB {
98        self.db
99    }
100
101    /// Store the chain spec hash
102    pub fn put_chain_spec_hash(&self, hash: &packed::Byte32) -> Result<(), Error> {
103        self.db.put_default(CHAIN_SPEC_HASH_KEY, hash.as_slice())
104    }
105
106    /// Return the chain spec hash
107    pub fn get_chain_spec_hash(&self) -> Option<packed::Byte32> {
108        self.db
109            .get_pinned_default(CHAIN_SPEC_HASH_KEY)
110            .expect("db operation should be ok")
111            .map(|raw| packed::Byte32Reader::from_slice_should_be_ok(raw.as_ref()).to_entity())
112    }
113
114    /// Return the chain spec hash
115    pub fn get_migration_version(&self) -> Option<DBPinnableSlice> {
116        self.db
117            .get_pinned_default(MIGRATION_VERSION_KEY)
118            .expect("db operation should be ok")
119    }
120
121    /// Set this snapshot at start of transaction
122    pub fn begin_transaction(&self) -> StoreTransaction {
123        StoreTransaction {
124            inner: self.db.transaction(),
125            freezer: self.freezer.clone(),
126            cache: Arc::clone(&self.cache),
127        }
128    }
129
130    /// Return `StoreSnapshot`
131    pub fn get_snapshot(&self) -> StoreSnapshot {
132        StoreSnapshot {
133            inner: self.db.get_snapshot(),
134            freezer: self.freezer.clone(),
135            cache: Arc::clone(&self.cache),
136        }
137    }
138
139    /// Construct `StoreWriteBatch` with default option.
140    pub fn new_write_batch(&self) -> StoreWriteBatch {
141        StoreWriteBatch {
142            inner: self.db.new_write_batch(),
143        }
144    }
145
146    /// Write batch into chain db.
147    pub fn write(&self, write_batch: &StoreWriteBatch) -> Result<(), Error> {
148        self.db.write(&write_batch.inner)
149    }
150
151    /// write options set_sync = true
152    ///
153    /// see [`RocksDB::write_sync`](ckb_db::RocksDB::write_sync).
154    pub fn write_sync(&self, write_batch: &StoreWriteBatch) -> Result<(), Error> {
155        self.db.write_sync(&write_batch.inner)
156    }
157
158    /// Force the data to go through the compaction in order to consolidate it
159    ///
160    /// see [`RocksDB::compact_range`](ckb_db::RocksDB::compact_range).
161    pub fn compact_range(
162        &self,
163        col: Col,
164        start: Option<&[u8]>,
165        end: Option<&[u8]>,
166    ) -> Result<(), Error> {
167        self.db.compact_range(col, start, end)
168    }
169
170    /// Initializes the database with the genesis block and epoch.
171    pub fn init(&self, consensus: &Consensus) -> Result<(), Error> {
172        let genesis = consensus.genesis_block();
173        let epoch = consensus.genesis_epoch_ext();
174        let db_txn = self.begin_transaction();
175        let genesis_hash = genesis.hash();
176        let ext = BlockExt {
177            received_at: genesis.timestamp(),
178            total_difficulty: genesis.difficulty(),
179            total_uncles_count: 0,
180            verified: Some(true),
181            txs_fees: vec![],
182            cycles: Some(vec![]),
183            txs_sizes: Some(vec![]),
184        };
185
186        attach_block_cell(&db_txn, genesis)?;
187        let last_block_hash_in_previous_epoch = epoch.last_block_hash_in_previous_epoch();
188
189        db_txn.insert_block(genesis)?;
190        db_txn.insert_block_ext(&genesis_hash, &ext)?;
191        db_txn.insert_tip_header(&genesis.header())?;
192        db_txn.insert_current_epoch_ext(epoch)?;
193        db_txn.insert_block_epoch_index(&genesis_hash, &last_block_hash_in_previous_epoch)?;
194        db_txn.insert_epoch_ext(&last_block_hash_in_previous_epoch, epoch)?;
195        db_txn.attach_block(genesis)?;
196
197        let mut mmr = ChainRootMMR::new(0, &db_txn);
198        mmr.push(genesis.digest())
199            .map_err(|e| InternalErrorKind::MMR.other(e))?;
200        mmr.commit().map_err(|e| InternalErrorKind::MMR.other(e))?;
201
202        db_txn.commit()?;
203
204        Ok(())
205    }
206}