use crate::*;
use snarkvm_algorithms::traits::LoadableMerkleParameters;
use snarkvm_objects::{
errors::{BlockError, StorageError},
traits::Transaction,
Block,
BlockHeader,
BlockHeaderHash,
};
use snarkvm_utilities::{bytes::ToBytes, has_duplicates, to_bytes};
impl<T: Transaction, P: LoadableMerkleParameters> Ledger<T, P> {
#[allow(clippy::type_complexity)]
pub(crate) fn commit_transaction(
&self,
sn_index: &mut usize,
cm_index: &mut usize,
memo_index: &mut usize,
transaction: &T,
) -> Result<(Vec<Op>, Vec<(T::Commitment, usize)>), StorageError> {
let old_serial_numbers = transaction.old_serial_numbers();
let new_commitments = transaction.new_commitments();
let mut ops = Vec::with_capacity(old_serial_numbers.len() + new_commitments.len());
let mut cms = Vec::with_capacity(new_commitments.len());
for sn in old_serial_numbers {
let sn_bytes = to_bytes![sn]?;
if self.get_sn_index(&sn_bytes)?.is_some() {
return Err(StorageError::ExistingSn(sn_bytes.to_vec()));
}
ops.push(Op::Insert {
col: COL_SERIAL_NUMBER,
key: sn_bytes,
value: (*sn_index as u32).to_le_bytes().to_vec(),
});
*sn_index += 1;
}
for cm in new_commitments {
let cm_bytes = to_bytes![cm]?;
if self.get_cm_index(&cm_bytes)?.is_some() {
return Err(StorageError::ExistingCm(cm_bytes.to_vec()));
}
ops.push(Op::Insert {
col: COL_COMMITMENT,
key: cm_bytes,
value: (*cm_index as u32).to_le_bytes().to_vec(),
});
cms.push((cm.clone(), *cm_index));
*cm_index += 1;
}
let memo_bytes = to_bytes![transaction.memorandum()]?;
if self.get_memo_index(&memo_bytes)?.is_some() {
return Err(StorageError::ExistingMemo(memo_bytes.to_vec()));
} else {
ops.push(Op::Insert {
col: COL_MEMO,
key: memo_bytes,
value: (*memo_index as u32).to_le_bytes().to_vec(),
});
*memo_index += 1;
}
Ok((ops, cms))
}
pub fn insert_only(&self, block: &Block<T>) -> Result<(), StorageError> {
let block_hash = block.header.get_hash();
if self.block_hash_exists(&block_hash) {
return Err(StorageError::BlockError(BlockError::BlockExists(
block_hash.to_string(),
)));
}
let mut database_transaction = DatabaseTransaction::new();
let mut transaction_serial_numbers = Vec::with_capacity(block.transactions.0.len());
let mut transaction_commitments = Vec::with_capacity(block.transactions.0.len());
let mut transaction_memos = Vec::with_capacity(block.transactions.0.len());
for transaction in &block.transactions.0 {
transaction_serial_numbers.push(transaction.transaction_id()?);
transaction_commitments.push(transaction.new_commitments());
transaction_memos.push(transaction.memorandum());
}
if has_duplicates(transaction_serial_numbers) {
return Err(StorageError::DuplicateSn);
}
if has_duplicates(transaction_commitments) {
return Err(StorageError::DuplicateCm);
}
if has_duplicates(transaction_memos) {
return Err(StorageError::DuplicateMemo);
}
for (index, transaction) in block.transactions.0.iter().enumerate() {
let transaction_location = TransactionLocation {
index: index as u32,
block_hash: block.header.get_hash().0,
};
database_transaction.push(Op::Insert {
col: COL_TRANSACTION_LOCATION,
key: transaction.transaction_id()?.to_vec(),
value: to_bytes![transaction_location]?.to_vec(),
});
}
database_transaction.push(Op::Insert {
col: COL_BLOCK_HEADER,
key: block_hash.0.to_vec(),
value: to_bytes![block.header]?.to_vec(),
});
database_transaction.push(Op::Insert {
col: COL_BLOCK_TRANSACTIONS,
key: block.header.get_hash().0.to_vec(),
value: to_bytes![block.transactions]?.to_vec(),
});
let mut child_hashes = self.get_child_block_hashes(&block.header.previous_block_hash)?;
if !child_hashes.contains(&block_hash) {
child_hashes.push(block_hash);
database_transaction.push(Op::Insert {
col: COL_CHILD_HASHES,
key: block.header.previous_block_hash.0.to_vec(),
value: bincode::serialize(&child_hashes)?,
});
}
database_transaction.push(Op::Insert {
col: COL_BLOCK_TRANSACTIONS,
key: block.header.get_hash().0.to_vec(),
value: to_bytes![block.transactions]?.to_vec(),
});
self.storage.write(database_transaction)?;
Ok(())
}
pub fn commit(&self, block_header_hash: &BlockHeaderHash) -> Result<(), StorageError> {
let block = self.get_block(block_header_hash)?;
if self.is_canon(block_header_hash) {
return Err(StorageError::ExistingCanonBlock(block_header_hash.to_string()));
}
let mut database_transaction = DatabaseTransaction::new();
let mut transaction_serial_numbers = Vec::with_capacity(block.transactions.0.len());
let mut transaction_commitments = Vec::with_capacity(block.transactions.0.len());
let mut transaction_memos = Vec::with_capacity(block.transactions.0.len());
for transaction in &block.transactions.0 {
transaction_serial_numbers.push(transaction.transaction_id()?);
transaction_commitments.push(transaction.new_commitments());
transaction_memos.push(transaction.memorandum());
}
if has_duplicates(transaction_serial_numbers) {
return Err(StorageError::DuplicateSn);
}
if has_duplicates(transaction_commitments) {
return Err(StorageError::DuplicateCm);
}
if has_duplicates(transaction_memos) {
return Err(StorageError::DuplicateMemo);
}
let mut sn_index = self.current_sn_index()?;
let mut cm_index = self.current_cm_index()?;
let mut memo_index = self.current_memo_index()?;
let mut transaction_cms = vec![];
for transaction in block.transactions.0.iter() {
let (tx_ops, cms) = self.commit_transaction(&mut sn_index, &mut cm_index, &mut memo_index, transaction)?;
database_transaction.push_vec(tx_ops);
transaction_cms.extend(cms);
}
database_transaction.push(Op::Insert {
col: COL_META,
key: KEY_CURR_SN_INDEX.as_bytes().to_vec(),
value: (sn_index as u32).to_le_bytes().to_vec(),
});
database_transaction.push(Op::Insert {
col: COL_META,
key: KEY_CURR_CM_INDEX.as_bytes().to_vec(),
value: (cm_index as u32).to_le_bytes().to_vec(),
});
database_transaction.push(Op::Insert {
col: COL_META,
key: KEY_CURR_MEMO_INDEX.as_bytes().to_vec(),
value: (memo_index as u32).to_le_bytes().to_vec(),
});
let is_genesis = block.header.previous_block_hash == BlockHeaderHash([0u8; 32])
&& self.get_latest_block_height() == 0
&& self.is_empty();
let mut height = self.latest_block_height.write();
let mut new_best_block_number = 0;
if !is_genesis {
new_best_block_number = *height + 1;
}
database_transaction.push(Op::Insert {
col: COL_META,
key: KEY_BEST_BLOCK_NUMBER.as_bytes().to_vec(),
value: new_best_block_number.to_le_bytes().to_vec(),
});
database_transaction.push(Op::Insert {
col: COL_BLOCK_LOCATOR,
key: block.header.get_hash().0.to_vec(),
value: new_best_block_number.to_le_bytes().to_vec(),
});
database_transaction.push(Op::Insert {
col: COL_BLOCK_LOCATOR,
key: new_best_block_number.to_le_bytes().to_vec(),
value: block.header.get_hash().0.to_vec(),
});
let new_merkle_tree = self.build_merkle_tree(transaction_cms)?;
let new_digest = new_merkle_tree.root();
database_transaction.push(Op::Insert {
col: COL_DIGEST,
key: to_bytes![new_digest]?.to_vec(),
value: new_best_block_number.to_le_bytes().to_vec(),
});
database_transaction.push(Op::Insert {
col: COL_META,
key: KEY_CURR_DIGEST.as_bytes().to_vec(),
value: to_bytes![new_digest]?.to_vec(),
});
let mut cm_merkle_tree = self.cm_merkle_tree.write();
*cm_merkle_tree = new_merkle_tree;
self.storage.write(database_transaction)?;
if !is_genesis {
*height += 1;
}
Ok(())
}
pub fn insert_and_commit(&self, block: &Block<T>) -> Result<(), StorageError> {
let block_hash = block.header.get_hash();
if !self.block_hash_exists(&block_hash) {
self.insert_only(&block)?;
}
self.commit(&block_hash)
}
pub fn is_canon(&self, block_hash: &BlockHeaderHash) -> bool {
self.block_hash_exists(block_hash) && self.get_block_number(block_hash).is_ok()
}
pub fn is_previous_block_canon(&self, block_header: &BlockHeader) -> bool {
self.is_canon(&block_header.previous_block_hash)
}
pub fn revert_for_fork(&self, side_chain_path: &SideChainPath) -> Result<(), StorageError> {
let latest_block_height = self.get_latest_block_height();
if side_chain_path.new_block_number > latest_block_height {
for _ in (side_chain_path.shared_block_number)..latest_block_height {
self.decommit_latest_block()?;
}
}
Ok(())
}
}