use crate::core::consensus::WEEK_HEIGHT;
use crate::core::core::committed::Committed;
use crate::core::core::hash::{Hash, Hashed};
use crate::core::core::merkle_proof::MerkleProof;
use crate::core::core::pmmr::{
self, Backend, ReadablePMMR, ReadonlyPMMR, RewindablePMMR, VecBackend, PMMR,
};
use crate::core::core::{
Block, BlockHeader, KernelFeatures, Output, OutputIdentifier, Segment, TxKernel,
};
use crate::core::global;
use crate::core::ser::{PMMRable, ProtocolVersion};
use crate::error::Error;
use crate::linked_list::{ListIndex, PruneableListIndex, RewindableListIndex};
use crate::store::{self, Batch, ChainStore};
use crate::txhashset::bitmap_accumulator::{BitmapAccumulator, BitmapChunk};
use crate::txhashset::{RewindableKernelView, UTXOView};
use crate::types::{CommitPos, OutputRoots, Tip, TxHashSetRoots, TxHashsetWriteStatus};
use crate::util::secp::pedersen::{Commitment, RangeProof};
use crate::util::{file, secp_static, zip, StopState};
use crate::SyncState;
use croaring::Bitmap;
use grin_store::pmmr::{clean_files_by_prefix, PMMRBackend};
use std::cmp::Ordering;
use std::fs::{self, File};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Instant;
const TXHASHSET_SUBDIR: &str = "txhashset";
const OUTPUT_SUBDIR: &str = "output";
const RANGE_PROOF_SUBDIR: &str = "rangeproof";
const KERNEL_SUBDIR: &str = "kernel";
const TXHASHSET_ZIP: &str = "txhashset_snapshot";
#[derive(Eq)]
enum OrderedHashLeafNode {
Hash(usize, u64),
Leaf(usize, u64),
}
impl PartialEq for OrderedHashLeafNode {
fn eq(&self, other: &Self) -> bool {
let a_val = match self {
OrderedHashLeafNode::Hash(_, pos0) => pos0,
OrderedHashLeafNode::Leaf(_, pos0) => pos0,
};
let b_val = match other {
OrderedHashLeafNode::Hash(_, pos0) => pos0,
OrderedHashLeafNode::Leaf(_, pos0) => pos0,
};
a_val == b_val
}
}
impl Ord for OrderedHashLeafNode {
fn cmp(&self, other: &Self) -> Ordering {
let a_val = match self {
OrderedHashLeafNode::Hash(_, pos0) => pos0,
OrderedHashLeafNode::Leaf(_, pos0) => pos0,
};
let b_val = match other {
OrderedHashLeafNode::Hash(_, pos0) => pos0,
OrderedHashLeafNode::Leaf(_, pos0) => pos0,
};
a_val.cmp(&b_val)
}
}
impl PartialOrd for OrderedHashLeafNode {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
let a_val = match self {
OrderedHashLeafNode::Hash(_, pos0) => pos0,
OrderedHashLeafNode::Leaf(_, pos0) => pos0,
};
let b_val = match other {
OrderedHashLeafNode::Hash(_, pos0) => pos0,
OrderedHashLeafNode::Leaf(_, pos0) => pos0,
};
Some(a_val.cmp(b_val))
}
}
pub struct PMMRHandle<T: PMMRable> {
pub backend: PMMRBackend<T>,
pub size: u64,
}
impl<T: PMMRable> PMMRHandle<T> {
pub fn new<P: AsRef<Path>>(
path: P,
prunable: bool,
version: ProtocolVersion,
header: Option<&BlockHeader>,
) -> Result<PMMRHandle<T>, Error> {
fs::create_dir_all(&path)?;
let backend = PMMRBackend::new(&path, prunable, version, header)?;
let size = backend.unpruned_size();
Ok(PMMRHandle { backend, size })
}
}
impl PMMRHandle<BlockHeader> {
pub fn init_head(&mut self, head: &Tip) -> Result<(), Error> {
let head_hash = self.head_hash()?;
let expected_hash = self.get_header_hash_by_height(head.height)?;
if head.hash() != expected_hash {
error!(
"header PMMR inconsistent: {} vs {} at {}",
expected_hash,
head.hash(),
head.height
);
return Err(Error::Other("header PMMR inconsistent".to_string()));
}
let next_height = head.height + 1;
let size = pmmr::insertion_to_pmmr_index(next_height);
debug!(
"init_head: header PMMR: current head {} at pos {}",
head_hash, self.size
);
debug!(
"init_head: header PMMR: resetting to {} at pos {} (height {})",
head.hash(),
size,
head.height
);
self.size = size;
Ok(())
}
pub fn get_header_hash_by_height(&self, height: u64) -> Result<Hash, Error> {
if height >= self.size {
return Err(Error::InvalidHeaderHeight(height));
}
let pos = pmmr::insertion_to_pmmr_index(height);
let header_pmmr = ReadonlyPMMR::at(&self.backend, self.size);
if let Some(entry) = header_pmmr.get_data(pos) {
Ok(entry.hash())
} else {
Err(Error::Other("get header hash by height".to_string()))
}
}
pub fn head_hash(&self) -> Result<Hash, Error> {
if self.size == 0 {
return Err(Error::Other("MMR empty, no head".to_string()));
}
let header_pmmr = ReadonlyPMMR::at(&self.backend, self.size);
let leaf_pos = pmmr::bintree_rightmost(self.size - 1);
if let Some(entry) = header_pmmr.get_data(leaf_pos) {
Ok(entry.hash())
} else {
Err(Error::Other("failed to find head hash".to_string()))
}
}
pub fn get_first_header_with(
&self,
output_pos: u64,
kernel_pos: u64,
from_height: u64,
store: Arc<store::ChainStore>,
) -> Option<BlockHeader> {
let mut cur_height = pmmr::round_up_to_leaf_pos(from_height);
let header_pmmr = ReadonlyPMMR::at(&self.backend, self.size);
let mut candidate: Option<BlockHeader> = None;
while let Some(header_entry) = header_pmmr.get_data(cur_height) {
if let Ok(bh) = store.get_block_header(&header_entry.hash()) {
if bh.output_mmr_size <= output_pos && bh.kernel_mmr_size <= kernel_pos {
candidate = Some(bh)
} else {
return candidate;
}
}
cur_height = pmmr::round_up_to_leaf_pos(cur_height + 1);
}
None
}
}
pub struct TxHashSet {
output_pmmr_h: PMMRHandle<OutputIdentifier>,
rproof_pmmr_h: PMMRHandle<RangeProof>,
kernel_pmmr_h: PMMRHandle<TxKernel>,
bitmap_accumulator: BitmapAccumulator,
commit_index: Arc<ChainStore>,
}
impl TxHashSet {
pub fn open(
root_dir: String,
commit_index: Arc<ChainStore>,
header: Option<&BlockHeader>,
) -> Result<TxHashSet, Error> {
let output_pmmr_h = PMMRHandle::new(
Path::new(&root_dir)
.join(TXHASHSET_SUBDIR)
.join(OUTPUT_SUBDIR),
true,
ProtocolVersion(1),
header,
)?;
let rproof_pmmr_h = PMMRHandle::new(
Path::new(&root_dir)
.join(TXHASHSET_SUBDIR)
.join(RANGE_PROOF_SUBDIR),
true,
ProtocolVersion(1),
header,
)?;
let bitmap_accumulator = TxHashSet::bitmap_accumulator(&output_pmmr_h)?;
let mut maybe_kernel_handle: Option<PMMRHandle<TxKernel>> = None;
let versions = vec![ProtocolVersion(2), ProtocolVersion(1)];
for version in versions {
let handle = PMMRHandle::new(
Path::new(&root_dir)
.join(TXHASHSET_SUBDIR)
.join(KERNEL_SUBDIR),
false, version,
None,
)?;
if handle.size == 0 {
debug!(
"attempting to open (empty) kernel PMMR using {:?} - SUCCESS",
version
);
maybe_kernel_handle = Some(handle);
break;
}
let kernel: Option<TxKernel> = ReadonlyPMMR::at(&handle.backend, 1).get_data(0);
if let Some(kernel) = kernel {
if kernel.verify().is_ok() {
debug!(
"attempting to open kernel PMMR using {:?} - SUCCESS",
version
);
maybe_kernel_handle = Some(handle);
break;
} else {
debug!(
"attempting to open kernel PMMR using {:?} - FAIL (verify failed)",
version
);
}
} else {
debug!(
"attempting to open kernel PMMR using {:?} - FAIL (read failed)",
version
);
}
}
if let Some(kernel_pmmr_h) = maybe_kernel_handle {
Ok(TxHashSet {
output_pmmr_h,
rproof_pmmr_h,
kernel_pmmr_h,
bitmap_accumulator,
commit_index,
})
} else {
Err(Error::TxHashSetErr(
"failed to open kernel PMMR".to_string(),
))
}
}
fn bitmap_accumulator(
pmmr_h: &PMMRHandle<OutputIdentifier>,
) -> Result<BitmapAccumulator, Error> {
let pmmr = ReadonlyPMMR::at(&pmmr_h.backend, pmmr_h.size);
let nbits = pmmr::n_leaves(pmmr_h.size);
let mut bitmap_accumulator = BitmapAccumulator::new();
bitmap_accumulator.init(&mut pmmr.leaf_idx_iter(0), nbits)?;
Ok(bitmap_accumulator)
}
pub fn release_backend_files(&mut self) {
self.output_pmmr_h.backend.release_files();
self.rproof_pmmr_h.backend.release_files();
self.kernel_pmmr_h.backend.release_files();
}
pub fn get_unspent(
&self,
commit: Commitment,
) -> Result<Option<(OutputIdentifier, CommitPos)>, Error> {
match self.commit_index.get_output_pos_height(&commit) {
Ok(Some(pos1)) => {
let output_pmmr: ReadonlyPMMR<'_, OutputIdentifier, _> =
ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.size);
if let Some(out) = output_pmmr.get_data(pos1.pos - 1) {
if out.commitment() == commit {
Ok(Some((out, pos1)))
} else {
Ok(None)
}
} else {
Ok(None)
}
}
Ok(None) => Ok(None),
Err(e) => Err(Error::StoreErr(e, "txhashset unspent check".to_string())),
}
}
pub fn last_n_output(&self, distance: u64) -> Vec<(Hash, OutputIdentifier)> {
ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.size)
.get_last_n_insertions(distance)
}
pub fn last_n_rangeproof(&self, distance: u64) -> Vec<(Hash, RangeProof)> {
ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.size)
.get_last_n_insertions(distance)
}
pub fn last_n_kernel(&self, distance: u64) -> Vec<(Hash, TxKernel)> {
ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.size)
.get_last_n_insertions(distance)
}
pub fn kernel_pmmr_at(
&self,
header: &BlockHeader,
) -> ReadonlyPMMR<TxKernel, PMMRBackend<TxKernel>> {
ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, header.kernel_mmr_size)
}
pub fn output_pmmr_at(
&self,
header: &BlockHeader,
) -> ReadonlyPMMR<OutputIdentifier, PMMRBackend<OutputIdentifier>> {
ReadonlyPMMR::at(&self.output_pmmr_h.backend, header.output_mmr_size)
}
pub fn rangeproof_pmmr_at(
&self,
header: &BlockHeader,
) -> ReadonlyPMMR<RangeProof, PMMRBackend<RangeProof>> {
ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, header.output_mmr_size)
}
pub fn get_block_header(&self, hash: &Hash) -> Result<BlockHeader, Error> {
Ok(self.commit_index.get_block_header(&hash)?)
}
pub fn outputs_by_pmmr_index(
&self,
start_index: u64,
max_count: u64,
max_index: Option<u64>,
) -> (u64, Vec<OutputIdentifier>) {
ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.size)
.elements_from_pmmr_index(start_index, max_count, max_index)
}
pub fn rangeproofs_by_pmmr_index(
&self,
start_index: u64,
max_count: u64,
max_index: Option<u64>,
) -> (u64, Vec<RangeProof>) {
ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.size)
.elements_from_pmmr_index(start_index, max_count, max_index)
}
pub fn output_mmr_size(&self) -> u64 {
self.output_pmmr_h.size
}
pub fn kernel_mmr_size(&self) -> u64 {
self.kernel_pmmr_h.size
}
pub fn rangeproof_mmr_size(&self) -> u64 {
self.rproof_pmmr_h.size
}
pub fn find_kernel(
&self,
excess: &Commitment,
min_index: Option<u64>,
max_index: Option<u64>,
) -> Option<(TxKernel, u64)> {
let min_index = min_index.unwrap_or(1);
let max_index = max_index.unwrap_or(self.kernel_pmmr_h.size);
let pmmr = ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.size);
let mut index = max_index + 1;
while index > min_index {
index -= 1;
if let Some(kernel) = pmmr.get_data(index - 1) {
if &kernel.excess == excess {
return Some((kernel, index));
}
}
}
None
}
pub fn roots(&self) -> Result<TxHashSetRoots, Error> {
let output_pmmr = ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.size);
let rproof_pmmr = ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.size);
let kernel_pmmr = ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.size);
Ok(TxHashSetRoots {
output_roots: OutputRoots {
pmmr_root: output_pmmr.root().map_err(|_| Error::InvalidRoot)?,
bitmap_root: self.bitmap_accumulator.root(),
},
rproof_root: rproof_pmmr.root().map_err(|_| Error::InvalidRoot)?,
kernel_root: kernel_pmmr.root().map_err(|_| Error::InvalidRoot)?,
})
}
pub fn get_output_pos(&self, commit: &Commitment) -> Result<u64, Error> {
Ok(self.commit_index.get_output_pos(&commit)?)
}
pub fn merkle_proof(&mut self, commit: Commitment) -> Result<MerkleProof, Error> {
let pos0 = self.commit_index.get_output_pos(&commit)?;
PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.size)
.merkle_proof(pos0)
.map_err(|_| Error::MerkleProof)
}
pub fn compact(
&mut self,
horizon_header: &BlockHeader,
batch: &Batch<'_>,
) -> Result<(), Error> {
debug!("txhashset: starting compaction...");
let head_header = batch.head_header()?;
let rewind_rm_pos = input_pos_to_rewind(&horizon_header, &head_header, batch)?;
debug!("txhashset: check_compact output mmr backend...");
self.output_pmmr_h
.backend
.check_compact(horizon_header.output_mmr_size, &rewind_rm_pos)?;
debug!("txhashset: check_compact rangeproof mmr backend...");
self.rproof_pmmr_h
.backend
.check_compact(horizon_header.output_mmr_size, &rewind_rm_pos)?;
debug!("txhashset: ... compaction finished");
Ok(())
}
pub fn init_recent_kernel_pos_index(
&self,
header_pmmr: &PMMRHandle<BlockHeader>,
batch: &Batch<'_>,
) -> Result<(), Error> {
let head = batch.head()?;
let cutoff = head.height.saturating_sub(WEEK_HEIGHT * 2);
let cutoff_hash = header_pmmr.get_header_hash_by_height(cutoff)?;
let cutoff_header = batch.get_block_header(&cutoff_hash)?;
self.verify_kernel_pos_index(&cutoff_header, header_pmmr, batch, None, None)
}
pub fn verify_kernel_pos_index(
&self,
from_header: &BlockHeader,
header_pmmr: &PMMRHandle<BlockHeader>,
batch: &Batch<'_>,
status: Option<Arc<SyncState>>,
stop_state: Option<Arc<StopState>>,
) -> Result<(), Error> {
if !global::is_nrd_enabled() {
return Ok(());
}
let now = Instant::now();
let kernel_index = store::nrd_recent_kernel_index();
kernel_index.clear(batch)?;
let prev_size = if from_header.height == 0 {
0
} else {
let prev_header = batch.get_previous_header(&from_header)?;
prev_header.kernel_mmr_size
};
debug!(
"verify_kernel_pos_index: header: {} at {}, prev kernel_mmr_size: {}",
from_header.hash(),
from_header.height,
prev_size,
);
let kernel_pmmr = ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.size);
let mut current_pos = prev_size + 1;
let mut current_header = from_header.clone();
let mut count = 0;
let total = pmmr::n_leaves(self.kernel_pmmr_h.size);
let mut applied = 0;
while current_pos <= self.kernel_pmmr_h.size {
if pmmr::is_leaf(current_pos - 1) {
if let Some(kernel) = kernel_pmmr.get_data(current_pos - 1) {
match kernel.features {
KernelFeatures::NoRecentDuplicate { .. } => {
while current_pos > current_header.kernel_mmr_size {
let hash = header_pmmr
.get_header_hash_by_height(current_header.height + 1)?;
current_header = batch.get_block_header(&hash)?;
}
let new_pos = CommitPos {
pos: current_pos,
height: current_header.height,
};
apply_kernel_rules(&kernel, new_pos, batch)?;
count += 1;
}
_ => {}
}
}
applied += 1;
if let Some(ref s) = status {
if total % applied == 10000 {
s.on_setup(None, None, Some(applied), Some(total));
}
}
}
if let Some(ref s) = stop_state {
if s.is_stopped() {
return Ok(());
}
}
current_pos += 1;
}
debug!(
"verify_kernel_pos_index: pushed {} entries to the index, took {}s",
count,
now.elapsed().as_secs(),
);
Ok(())
}
pub fn init_output_pos_index(
&self,
header_pmmr: &PMMRHandle<BlockHeader>,
batch: &Batch<'_>,
) -> Result<(), Error> {
let now = Instant::now();
let output_pmmr = ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.size);
let mut removed_count = 0;
for (key, pos1) in batch.output_pos_iter()? {
let pos0 = pos1.pos - 1;
if let Some(out) = output_pmmr.get_data(pos0) {
if let Ok(pos0_via_mmr) = batch.get_output_pos(&out.commitment()) {
if pos0 == pos0_via_mmr
&& batch.is_match_output_pos_key(&key, &out.commitment())
{
continue;
}
}
}
batch.delete(&key)?;
removed_count += 1;
}
debug!(
"init_output_pos_index: removed {} stale index entries",
removed_count
);
let mut outputs_pos: Vec<(Commitment, u64)> = vec![];
for pos0 in output_pmmr.leaf_pos_iter() {
if let Some(out) = output_pmmr.get_data(pos0) {
outputs_pos.push((out.commit, 1 + pos0));
}
}
debug!("init_output_pos_index: {} utxos", outputs_pos.len());
outputs_pos.retain(|x| {
batch
.get_output_pos_height(&x.0)
.map(|p| p.is_none())
.unwrap_or(true)
});
debug!(
"init_output_pos_index: {} utxos with missing index entries",
outputs_pos.len()
);
if outputs_pos.is_empty() {
return Ok(());
}
let total_outputs = outputs_pos.len();
let max_height = batch.head()?.height;
let mut i = 0;
for search_height in 0..max_height {
let hash = header_pmmr.get_header_hash_by_height(search_height + 1)?;
let h = batch.get_block_header(&hash)?;
while i < total_outputs {
let (commit, pos1) = outputs_pos[i];
if pos1 > h.output_mmr_size {
break;
}
batch.save_output_pos_height(
&commit,
CommitPos {
pos: pos1,
height: h.height,
},
)?;
i += 1;
}
}
debug!(
"init_output_pos_index: added entries for {} utxos, took {}s",
total_outputs,
now.elapsed().as_secs(),
);
Ok(())
}
}
pub fn extending_readonly<F, T>(
handle: &mut PMMRHandle<BlockHeader>,
trees: &mut TxHashSet,
inner: F,
) -> Result<T, Error>
where
F: FnOnce(&mut ExtensionPair<'_>, &Batch<'_>) -> Result<T, Error>,
{
let commit_index = trees.commit_index.clone();
let batch = commit_index.batch()?;
trace!("Starting new txhashset (readonly) extension.");
let head = batch.head()?;
let header_head = batch.header_head()?;
let res = {
let header_pmmr = PMMR::at(&mut handle.backend, handle.size);
let mut header_extension = HeaderExtension::new(header_pmmr, header_head);
let mut extension = Extension::new(trees, head);
let mut extension_pair = ExtensionPair {
header_extension: &mut header_extension,
extension: &mut extension,
};
inner(&mut extension_pair, &batch)
};
trace!("Rollbacking txhashset (readonly) extension.");
handle.backend.discard();
trees.output_pmmr_h.backend.discard();
trees.rproof_pmmr_h.backend.discard();
trees.kernel_pmmr_h.backend.discard();
trace!("TxHashSet (readonly) extension done.");
res
}
pub fn utxo_view<F, T>(
handle: &PMMRHandle<BlockHeader>,
trees: &TxHashSet,
inner: F,
) -> Result<T, Error>
where
F: FnOnce(&UTXOView<'_>, &Batch<'_>) -> Result<T, Error>,
{
let res: Result<T, Error>;
{
let header_pmmr = ReadonlyPMMR::at(&handle.backend, handle.size);
let output_pmmr = ReadonlyPMMR::at(&trees.output_pmmr_h.backend, trees.output_pmmr_h.size);
let rproof_pmmr = ReadonlyPMMR::at(&trees.rproof_pmmr_h.backend, trees.rproof_pmmr_h.size);
let batch = trees.commit_index.batch()?;
let utxo = UTXOView::new(header_pmmr, output_pmmr, rproof_pmmr);
res = inner(&utxo, &batch);
}
res
}
pub fn rewindable_kernel_view<F, T>(trees: &TxHashSet, inner: F) -> Result<T, Error>
where
F: FnOnce(&mut RewindableKernelView<'_>, &Batch<'_>) -> Result<T, Error>,
{
let res: Result<T, Error>;
{
let kernel_pmmr =
RewindablePMMR::at(&trees.kernel_pmmr_h.backend, trees.kernel_pmmr_h.size);
let batch = trees.commit_index.batch()?;
let header = batch.head_header()?;
let mut view = RewindableKernelView::new(kernel_pmmr, header);
res = inner(&mut view, &batch);
}
res
}
pub fn extending<'a, F, T>(
header_pmmr: &'a mut PMMRHandle<BlockHeader>,
trees: &'a mut TxHashSet,
batch: &'a mut Batch<'_>,
inner: F,
) -> Result<T, Error>
where
F: FnOnce(&mut ExtensionPair<'_>, &Batch<'_>) -> Result<T, Error>,
{
let sizes: (u64, u64, u64);
let res: Result<T, Error>;
let rollback: bool;
let bitmap_accumulator: BitmapAccumulator;
let head = batch.head()?;
let header_head = batch.header_head()?;
let child_batch = batch.child()?;
{
trace!("Starting new txhashset extension.");
let header_pmmr = PMMR::at(&mut header_pmmr.backend, header_pmmr.size);
let mut header_extension = HeaderExtension::new(header_pmmr, header_head);
let mut extension = Extension::new(trees, head);
let mut extension_pair = ExtensionPair {
header_extension: &mut header_extension,
extension: &mut extension,
};
res = inner(&mut extension_pair, &child_batch);
rollback = extension_pair.extension.rollback;
sizes = extension_pair.extension.sizes();
bitmap_accumulator = extension_pair.extension.bitmap_accumulator.clone();
}
header_pmmr.backend.discard();
match res {
Err(e) => {
debug!("Error returned, discarding txhashset extension: {}", e);
trees.output_pmmr_h.backend.discard();
trees.rproof_pmmr_h.backend.discard();
trees.kernel_pmmr_h.backend.discard();
Err(e)
}
Ok(r) => {
if rollback {
trace!("Rollbacking txhashset extension. sizes {:?}", sizes);
trees.output_pmmr_h.backend.discard();
trees.rproof_pmmr_h.backend.discard();
trees.kernel_pmmr_h.backend.discard();
} else {
trace!("Committing txhashset extension. sizes {:?}", sizes);
child_batch.commit()?;
trees.output_pmmr_h.backend.sync()?;
trees.rproof_pmmr_h.backend.sync()?;
trees.kernel_pmmr_h.backend.sync()?;
trees.output_pmmr_h.size = sizes.0;
trees.rproof_pmmr_h.size = sizes.1;
trees.kernel_pmmr_h.size = sizes.2;
trees.bitmap_accumulator = bitmap_accumulator;
}
trace!("TxHashSet extension done.");
Ok(r)
}
}
}
pub fn header_extending_readonly<'a, F, T>(
handle: &'a mut PMMRHandle<BlockHeader>,
store: &ChainStore,
inner: F,
) -> Result<T, Error>
where
F: FnOnce(&mut HeaderExtension<'_>, &Batch<'_>) -> Result<T, Error>,
{
let batch = store.batch()?;
let head = match handle.head_hash() {
Ok(hash) => {
let header = batch.get_block_header(&hash)?;
Tip::from_header(&header)
}
Err(_) => Tip::default(),
};
let pmmr = PMMR::at(&mut handle.backend, handle.size);
let mut extension = HeaderExtension::new(pmmr, head);
let res = inner(&mut extension, &batch);
handle.backend.discard();
res
}
pub fn header_extending<'a, F, T>(
handle: &'a mut PMMRHandle<BlockHeader>,
batch: &'a mut Batch<'_>,
inner: F,
) -> Result<T, Error>
where
F: FnOnce(&mut HeaderExtension<'_>, &Batch<'_>) -> Result<T, Error>,
{
let size: u64;
let res: Result<T, Error>;
let rollback: bool;
let child_batch = batch.child()?;
let head = match handle.head_hash() {
Ok(hash) => {
let header = child_batch.get_block_header(&hash)?;
Tip::from_header(&header)
}
Err(_) => Tip::default(),
};
{
let pmmr = PMMR::at(&mut handle.backend, handle.size);
let mut extension = HeaderExtension::new(pmmr, head);
res = inner(&mut extension, &child_batch);
rollback = extension.rollback;
size = extension.size();
}
match res {
Err(e) => {
handle.backend.discard();
Err(e)
}
Ok(r) => {
if rollback {
handle.backend.discard();
} else {
child_batch.commit()?;
handle.backend.sync()?;
handle.size = size;
}
Ok(r)
}
}
}
pub struct HeaderExtension<'a> {
head: Tip,
pmmr: PMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
rollback: bool,
}
impl<'a> HeaderExtension<'a> {
fn new(
pmmr: PMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
head: Tip,
) -> HeaderExtension<'a> {
HeaderExtension {
head,
pmmr,
rollback: false,
}
}
fn get_header_hash(&self, pos0: u64) -> Option<Hash> {
self.pmmr.get_data(pos0).map(|x| x.hash())
}
pub fn head(&self) -> Tip {
self.head.clone()
}
pub fn get_header_hash_by_height(&self, height: u64) -> Option<Hash> {
let pos = pmmr::insertion_to_pmmr_index(height);
self.get_header_hash(pos)
}
pub fn get_header_by_height(
&self,
height: u64,
batch: &Batch<'_>,
) -> Result<BlockHeader, Error> {
if let Some(hash) = self.get_header_hash_by_height(height) {
Ok(batch.get_block_header(&hash)?)
} else {
Err(Error::Other("get header by height".to_string()))
}
}
pub fn is_on_current_chain<T: Into<Tip>>(
&self,
t: T,
batch: &Batch<'_>,
) -> Result<bool, Error> {
let t = t.into();
if t.height > self.head.height {
return Ok(false);
}
let chain_header = self.get_header_by_height(t.height, batch)?;
Ok(chain_header.hash() == t.hash())
}
pub fn force_rollback(&mut self) {
self.rollback = true;
}
pub fn apply_header(&mut self, header: &BlockHeader) -> Result<(), Error> {
self.pmmr.push(header).map_err(&Error::TxHashSetErr)?;
self.head = Tip::from_header(header);
Ok(())
}
pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> {
debug!(
"Rewind header extension to {} at {} from {} at {}",
header.hash(),
header.height,
self.head.hash(),
self.head.height,
);
let header_pos = 1 + pmmr::insertion_to_pmmr_index(header.height);
self.pmmr
.rewind(header_pos, &Bitmap::new())
.map_err(&Error::TxHashSetErr)?;
self.head = Tip::from_header(header);
Ok(())
}
pub fn size(&self) -> u64 {
self.pmmr.unpruned_size()
}
pub fn root(&self) -> Result<Hash, Error> {
Ok(self.pmmr.root().map_err(|_| Error::InvalidRoot)?)
}
pub fn validate_root(&self, header: &BlockHeader) -> Result<(), Error> {
if header.height == 0 {
return Ok(());
}
if self.root()? != header.prev_root {
Err(Error::InvalidRoot)
} else {
Ok(())
}
}
}
pub struct ExtensionPair<'a> {
pub header_extension: &'a mut HeaderExtension<'a>,
pub extension: &'a mut Extension<'a>,
}
pub struct Extension<'a> {
head: Tip,
output_pmmr: PMMR<'a, OutputIdentifier, PMMRBackend<OutputIdentifier>>,
rproof_pmmr: PMMR<'a, RangeProof, PMMRBackend<RangeProof>>,
kernel_pmmr: PMMR<'a, TxKernel, PMMRBackend<TxKernel>>,
bitmap_accumulator: BitmapAccumulator,
bitmap_cache: Bitmap,
rollback: bool,
}
impl<'a> Committed for Extension<'a> {
fn inputs_committed(&self) -> Vec<Commitment> {
vec![]
}
fn outputs_committed(&self) -> Vec<Commitment> {
let mut commitments = vec![];
for pos0 in self.output_pmmr.leaf_pos_iter() {
if let Some(out) = self.output_pmmr.get_data(pos0) {
commitments.push(out.commit);
}
}
commitments
}
fn kernels_committed(&self) -> Vec<Commitment> {
let mut commitments = vec![];
for n in 0..self.kernel_pmmr.unpruned_size() {
if pmmr::is_leaf(n) {
if let Some(kernel) = self.kernel_pmmr.get_data(n) {
commitments.push(kernel.excess());
}
}
}
commitments
}
}
impl<'a> Extension<'a> {
fn new(trees: &'a mut TxHashSet, head: Tip) -> Extension<'a> {
Extension {
head,
output_pmmr: PMMR::at(&mut trees.output_pmmr_h.backend, trees.output_pmmr_h.size),
rproof_pmmr: PMMR::at(&mut trees.rproof_pmmr_h.backend, trees.rproof_pmmr_h.size),
kernel_pmmr: PMMR::at(&mut trees.kernel_pmmr_h.backend, trees.kernel_pmmr_h.size),
bitmap_accumulator: trees.bitmap_accumulator.clone(),
bitmap_cache: trees
.bitmap_accumulator
.as_bitmap()
.unwrap_or(Bitmap::new()),
rollback: false,
}
}
pub fn head(&self) -> Tip {
self.head.clone()
}
pub fn utxo_view(&'a self, header_ext: &'a HeaderExtension<'a>) -> UTXOView<'a> {
UTXOView::new(
header_ext.pmmr.readonly_pmmr(),
self.output_readonly_pmmr(),
self.rproof_readonly_pmmr(),
)
}
pub fn output_readonly_pmmr(
&self,
) -> ReadonlyPMMR<OutputIdentifier, PMMRBackend<OutputIdentifier>> {
self.output_pmmr.readonly_pmmr()
}
pub fn bitmap_accumulator(&self) -> BitmapAccumulator {
self.bitmap_accumulator.clone()
}
pub fn bitmap_readonly_pmmr(&self) -> ReadonlyPMMR<BitmapChunk, VecBackend<BitmapChunk>> {
self.bitmap_accumulator.readonly_pmmr()
}
pub fn rproof_readonly_pmmr(&self) -> ReadonlyPMMR<RangeProof, PMMRBackend<RangeProof>> {
self.rproof_pmmr.readonly_pmmr()
}
pub fn reset_prune_lists(&mut self) {
self.output_pmmr.reset_prune_list();
self.rproof_pmmr.reset_prune_list();
}
pub fn apply_block(
&mut self,
b: &Block,
header_ext: &HeaderExtension<'_>,
batch: &Batch<'_>,
) -> Result<(), Error> {
let mut affected_pos = vec![];
for out in b.outputs() {
let pos = self.apply_output(out, batch)?;
affected_pos.push(pos);
batch.save_output_pos_height(
&out.commitment(),
CommitPos {
pos,
height: b.header.height,
},
)?;
}
let spent = self
.utxo_view(header_ext)
.validate_inputs(&b.inputs(), batch)?;
for (out, pos) in &spent {
self.apply_input(out.commitment(), *pos)?;
affected_pos.push(pos.pos);
batch.delete_output_pos_height(&out.commitment())?;
}
let spent: Vec<_> = spent.into_iter().map(|(_, pos)| pos).collect();
batch.save_spent_index(&b.hash(), &spent)?;
self.apply_kernels(b.kernels(), b.header.height, batch)?;
self.apply_to_bitmap_accumulator(&affected_pos)?;
self.head = Tip::from_header(&b.header);
Ok(())
}
fn apply_to_bitmap_accumulator(&mut self, output_pos: &[u64]) -> Result<(), Error> {
let mut output_idx: Vec<_> = output_pos
.iter()
.map(|x| pmmr::n_leaves(*x).saturating_sub(1))
.collect();
output_idx.sort_unstable();
let min_idx = output_idx.first().cloned().unwrap_or(0);
let size = pmmr::n_leaves(self.output_pmmr.size);
self.bitmap_accumulator.apply(
output_idx,
self.output_pmmr
.leaf_idx_iter(BitmapAccumulator::chunk_start_idx(min_idx)),
size,
)
}
pub fn set_bitmap_accumulator(&mut self, accumulator: BitmapAccumulator) {
self.bitmap_accumulator = accumulator;
self.bitmap_cache = self.bitmap_accumulator.as_bitmap().unwrap_or(Bitmap::new());
}
fn apply_input(&mut self, commit: Commitment, pos: CommitPos) -> Result<(), Error> {
match self.output_pmmr.prune(pos.pos - 1) {
Ok(true) => {
self.rproof_pmmr
.prune(pos.pos - 1)
.map_err(Error::TxHashSetErr)?;
Ok(())
}
Ok(false) => Err(Error::AlreadySpent(commit)),
Err(e) => Err(Error::TxHashSetErr(e)),
}
}
fn apply_output(&mut self, out: &Output, batch: &Batch<'_>) -> Result<u64, Error> {
let commit = out.commitment();
if let Ok(pos0) = batch.get_output_pos(&commit) {
if let Some(out_mmr) = self.output_pmmr.get_data(pos0) {
if out_mmr.commitment() == commit {
return Err(Error::DuplicateCommitment(commit));
}
}
}
let output_pos = self
.output_pmmr
.push(&out.identifier())
.map_err(&Error::TxHashSetErr)?;
let rproof_pos = self
.rproof_pmmr
.push(&out.proof())
.map_err(&Error::TxHashSetErr)?;
{
if self.output_pmmr.unpruned_size() != self.rproof_pmmr.unpruned_size() {
return Err(Error::Other(
"output vs rproof MMRs different sizes".to_string(),
));
}
if output_pos != rproof_pos {
return Err(Error::Other(
"output vs rproof MMRs different pos".to_string(),
));
}
}
Ok(1 + output_pos)
}
pub fn update_leaf_sets(&mut self, bitmap: &Bitmap) -> Result<(), Error> {
let flipped = bitmap.flip(0u32..bitmap.maximum().unwrap() + 1);
for spent_pmmr_index in flipped.iter() {
let pos0 = pmmr::insertion_to_pmmr_index(spent_pmmr_index.into());
self.output_pmmr.remove_from_leaf_set(pos0);
self.rproof_pmmr.remove_from_leaf_set(pos0);
}
Ok(())
}
fn sort_pmmr_hashes_and_leaves(
&mut self,
hash_pos: Vec<u64>,
leaf_pos: Vec<u64>,
skip_leaf_position: Option<u64>,
) -> Vec<OrderedHashLeafNode> {
let mut ordered_inserts = vec![];
for (data_index, pos0) in leaf_pos.iter().enumerate() {
if skip_leaf_position == Some(*pos0) {
continue;
}
ordered_inserts.push(OrderedHashLeafNode::Leaf(data_index, *pos0));
}
for (data_index, pos0) in hash_pos.iter().enumerate() {
ordered_inserts.push(OrderedHashLeafNode::Hash(data_index, *pos0));
}
ordered_inserts.sort();
ordered_inserts
}
pub fn apply_output_segment(
&mut self,
segment: Segment<OutputIdentifier>,
) -> Result<(), Error> {
let (_sid, hash_pos, hashes, leaf_pos, leaf_data, _proof) = segment.parts();
for insert in self.sort_pmmr_hashes_and_leaves(hash_pos, leaf_pos, Some(0)) {
match insert {
OrderedHashLeafNode::Hash(idx, pos0) => {
if pos0 >= self.output_pmmr.size {
if self.output_pmmr.size == 1 {
self.output_pmmr
.rewind(0, &Bitmap::new())
.map_err(&Error::TxHashSetErr)?;
}
self.output_pmmr
.push_pruned_subtree(hashes[idx], pos0)
.map_err(&Error::TxHashSetErr)?;
}
}
OrderedHashLeafNode::Leaf(idx, pos0) => {
if pos0 == self.output_pmmr.size {
self.output_pmmr
.push(&leaf_data[idx])
.map_err(&Error::TxHashSetErr)?;
}
let pmmr_index = pmmr::pmmr_leaf_to_insertion_index(pos0);
match pmmr_index {
Some(i) => {
if !self.bitmap_cache.contains(i as u32) {
self.output_pmmr.remove_from_leaf_set(pos0);
}
}
None => {}
};
}
}
}
Ok(())
}
pub fn apply_rangeproof_segment(&mut self, segment: Segment<RangeProof>) -> Result<(), Error> {
let (_sid, hash_pos, hashes, leaf_pos, leaf_data, _proof) = segment.parts();
for insert in self.sort_pmmr_hashes_and_leaves(hash_pos, leaf_pos, Some(0)) {
match insert {
OrderedHashLeafNode::Hash(idx, pos0) => {
if pos0 >= self.rproof_pmmr.size {
if self.rproof_pmmr.size == 1 {
self.rproof_pmmr
.rewind(0, &Bitmap::new())
.map_err(&Error::TxHashSetErr)?;
}
self.rproof_pmmr
.push_pruned_subtree(hashes[idx], pos0)
.map_err(&Error::TxHashSetErr)?;
}
}
OrderedHashLeafNode::Leaf(idx, pos0) => {
if pos0 == self.rproof_pmmr.size {
self.rproof_pmmr
.push(&leaf_data[idx])
.map_err(&Error::TxHashSetErr)?;
}
let pmmr_index = pmmr::pmmr_leaf_to_insertion_index(pos0);
match pmmr_index {
Some(i) => {
if !self.bitmap_cache.contains(i as u32) {
self.rproof_pmmr.remove_from_leaf_set(pos0);
}
}
None => {}
};
}
}
}
Ok(())
}
pub fn apply_kernels(
&mut self,
kernels: &[TxKernel],
height: u64,
batch: &Batch<'_>,
) -> Result<(), Error> {
for kernel in kernels {
let pos = self.apply_kernel(kernel)?;
let commit_pos = CommitPos { pos, height };
apply_kernel_rules(kernel, commit_pos, batch)?;
}
Ok(())
}
pub fn apply_kernel_segment(&mut self, segment: Segment<TxKernel>) -> Result<(), Error> {
let (_sid, _hash_pos, _hashes, leaf_pos, leaf_data, _proof) = segment.parts();
for insert in self.sort_pmmr_hashes_and_leaves(vec![], leaf_pos, Some(0)) {
match insert {
OrderedHashLeafNode::Hash(_, _) => {
return Err(Error::InvalidSegment(
"Kernel PMMR is non-prunable, should not have hash data".to_string(),
)
.into());
}
OrderedHashLeafNode::Leaf(idx, pos0) => {
if pos0 == self.kernel_pmmr.size {
self.kernel_pmmr
.push(&leaf_data[idx])
.map_err(&Error::TxHashSetErr)?;
}
}
}
}
Ok(())
}
fn apply_kernel(&mut self, kernel: &TxKernel) -> Result<u64, Error> {
let pos = self
.kernel_pmmr
.push(kernel)
.map_err(&Error::TxHashSetErr)?;
Ok(1 + pos)
}
pub fn merkle_proof<T: AsRef<OutputIdentifier>>(
&self,
out_id: T,
batch: &Batch<'_>,
) -> Result<MerkleProof, Error> {
let out_id = out_id.as_ref();
debug!("txhashset: merkle_proof: output: {:?}", out_id.commit);
let pos0 = batch.get_output_pos(&out_id.commit)?;
let merkle_proof = self
.output_pmmr
.merkle_proof(pos0)
.map_err(&Error::TxHashSetErr)?;
Ok(merkle_proof)
}
pub fn snapshot(&mut self, batch: &Batch<'_>) -> Result<(), Error> {
let header = batch.get_block_header(&self.head.last_block_h)?;
self.output_pmmr.snapshot(&header).map_err(Error::Other)?;
self.rproof_pmmr.snapshot(&header).map_err(Error::Other)?;
Ok(())
}
pub fn rewind(&mut self, header: &BlockHeader, batch: &Batch<'_>) -> Result<(), Error> {
debug!(
"Rewind extension to {} at {} from {} at {}",
header.hash(),
header.height,
self.head.hash(),
self.head.height
);
let head_header = batch.get_block_header(&self.head.hash())?;
if head_header.height <= header.height {
self.rewind_mmrs_to_pos(header.output_mmr_size, header.kernel_mmr_size, &[])?;
self.apply_to_bitmap_accumulator(&[header.output_mmr_size])?;
} else {
let mut affected_pos = vec![];
let mut current = head_header;
while header.height < current.height {
let block = batch.get_block(¤t.hash())?;
let mut affected_pos_single_block = self.rewind_single_block(&block, batch)?;
affected_pos.append(&mut affected_pos_single_block);
current = batch.get_previous_header(¤t)?;
}
self.apply_to_bitmap_accumulator(&affected_pos)?;
}
self.head = Tip::from_header(header);
Ok(())
}
fn rewind_single_block(&mut self, block: &Block, batch: &Batch<'_>) -> Result<Vec<u64>, Error> {
let header = &block.header;
let prev_header = batch.get_previous_header(&header)?;
let spent = batch.get_spent_index(&header.hash());
let spent_pos: Vec<_> = if let Ok(ref spent) = spent {
spent.iter().map(|x| x.pos).collect()
} else {
warn!(
"rewind_single_block: fallback to legacy input bitmap for block {} at {}",
header.hash(),
header.height
);
let bitmap = batch.get_block_input_bitmap(&header.hash())?;
bitmap.iter().map(|x| x.into()).collect()
};
if header.height == 0 {
self.rewind_mmrs_to_pos(0, 0, &spent_pos)?;
} else {
let prev = batch.get_previous_header(header)?;
self.rewind_mmrs_to_pos(prev.output_mmr_size, prev.kernel_mmr_size, &spent_pos)?;
}
let mut affected_pos = spent_pos;
affected_pos.push(self.output_pmmr.size);
let mut missing_count = 0;
for out in block.outputs() {
if batch.delete_output_pos_height(&out.commitment()).is_err() {
missing_count += 1;
}
}
if missing_count > 0 {
warn!(
"rewind_single_block: {} output_pos entries missing for: {} at {}",
missing_count,
header.hash(),
header.height,
);
}
if global::is_nrd_enabled() {
let kernel_index = store::nrd_recent_kernel_index();
for kernel in block.kernels() {
if let KernelFeatures::NoRecentDuplicate { .. } = kernel.features {
kernel_index.rewind(batch, kernel.excess(), prev_header.kernel_mmr_size)?;
}
}
}
if let Ok(spent) = spent {
for pos1 in spent {
if let Some(out) = self.output_pmmr.get_data(pos1.pos - 1) {
batch.save_output_pos_height(&out.commitment(), pos1)?;
}
}
}
Ok(affected_pos)
}
fn rewind_mmrs_to_pos(
&mut self,
output_pos: u64,
kernel_pos: u64,
spent_pos: &[u64],
) -> Result<(), Error> {
let bitmap: Bitmap = spent_pos.iter().map(|x| *x as u32).collect();
self.output_pmmr
.rewind(output_pos, &bitmap)
.map_err(&Error::TxHashSetErr)?;
self.rproof_pmmr
.rewind(output_pos, &bitmap)
.map_err(&Error::TxHashSetErr)?;
self.kernel_pmmr
.rewind(kernel_pos, &Bitmap::new())
.map_err(&Error::TxHashSetErr)?;
Ok(())
}
pub fn roots(&self) -> Result<TxHashSetRoots, Error> {
Ok(TxHashSetRoots {
output_roots: OutputRoots {
pmmr_root: self.output_pmmr.root().map_err(|_| Error::InvalidRoot)?,
bitmap_root: self.bitmap_accumulator.root(),
},
rproof_root: self.rproof_pmmr.root().map_err(|_| Error::InvalidRoot)?,
kernel_root: self.kernel_pmmr.root().map_err(|_| Error::InvalidRoot)?,
})
}
pub fn validate_roots(&self, header: &BlockHeader) -> Result<(), Error> {
if header.height == 0 {
return Ok(());
}
self.roots()?.validate(header)
}
pub fn validate_sizes(&self, header: &BlockHeader) -> Result<(), Error> {
if header.height == 0 {
return Ok(());
}
if (
header.output_mmr_size,
header.output_mmr_size,
header.kernel_mmr_size,
) != self.sizes()
{
Err(Error::InvalidMMRSize)
} else {
Ok(())
}
}
fn validate_mmrs(&self) -> Result<(), Error> {
let now = Instant::now();
if let Err(e) = self.output_pmmr.validate() {
return Err(Error::InvalidTxHashSet(e));
}
if let Err(e) = self.rproof_pmmr.validate() {
return Err(Error::InvalidTxHashSet(e));
}
if let Err(e) = self.kernel_pmmr.validate() {
return Err(Error::InvalidTxHashSet(e));
}
debug!(
"txhashset: validated the output {}, rproof {}, kernel {} mmrs, took {}s",
self.output_pmmr.unpruned_size(),
self.rproof_pmmr.unpruned_size(),
self.kernel_pmmr.unpruned_size(),
now.elapsed().as_secs(),
);
Ok(())
}
pub fn validate_kernel_sums(
&self,
genesis: &BlockHeader,
header: &BlockHeader,
) -> Result<(Commitment, Commitment), Error> {
let now = Instant::now();
let (utxo_sum, kernel_sum) = self.verify_kernel_sums(
header.total_overage(genesis.kernel_mmr_size > 0),
header.total_kernel_offset(),
)?;
debug!(
"txhashset: validated total kernel sums, took {}s",
now.elapsed().as_secs(),
);
Ok((utxo_sum, kernel_sum))
}
pub fn validate(
&self,
genesis: &BlockHeader,
fast_validation: bool,
status: &dyn TxHashsetWriteStatus,
output_start_pos: Option<u64>,
_kernel_start_pos: Option<u64>,
header: &BlockHeader,
stop_state: Option<Arc<StopState>>,
) -> Result<(Commitment, Commitment), Error> {
self.validate_mmrs()?;
self.validate_roots(header)?;
self.validate_sizes(header)?;
if self.head.height == 0 {
let zero_commit = secp_static::commit_to_zero_value();
return Ok((zero_commit, zero_commit));
}
let (output_sum, kernel_sum) = self.validate_kernel_sums(genesis, header)?;
if !fast_validation {
self.verify_rangeproofs(
Some(status),
output_start_pos,
None,
false,
stop_state.clone(),
)?;
if let Some(ref s) = stop_state {
if s.is_stopped() {
return Err(Error::Stopped.into());
}
}
self.verify_kernel_signatures(status, stop_state.clone())?;
if let Some(ref s) = stop_state {
if s.is_stopped() {
return Err(Error::Stopped.into());
}
}
}
Ok((output_sum, kernel_sum))
}
pub fn force_rollback(&mut self) {
self.rollback = true;
}
pub fn dump_output_pmmr(&self) {
debug!("-- outputs --");
self.output_pmmr.dump_from_file(false);
debug!("--");
self.output_pmmr.dump_stats();
debug!("-- end of outputs --");
}
pub fn dump(&self, short: bool) {
debug!("-- outputs --");
self.output_pmmr.dump(short);
if !short {
debug!("-- range proofs --");
self.rproof_pmmr.dump(short);
debug!("-- kernels --");
self.kernel_pmmr.dump(short);
}
}
pub fn sizes(&self) -> (u64, u64, u64) {
(
self.output_pmmr.unpruned_size(),
self.rproof_pmmr.unpruned_size(),
self.kernel_pmmr.unpruned_size(),
)
}
fn verify_kernel_signatures(
&self,
status: &dyn TxHashsetWriteStatus,
stop_state: Option<Arc<StopState>>,
) -> Result<(), Error> {
let now = Instant::now();
const KERNEL_BATCH_SIZE: usize = 5_000;
let mut kern_count = 0;
let total_kernels = pmmr::n_leaves(self.kernel_pmmr.unpruned_size());
let mut tx_kernels: Vec<TxKernel> = Vec::with_capacity(KERNEL_BATCH_SIZE);
for n in 0..self.kernel_pmmr.unpruned_size() {
if pmmr::is_leaf(n) {
let kernel = self
.kernel_pmmr
.get_data(n)
.ok_or_else(|| Error::TxKernelNotFound)?;
tx_kernels.push(kernel);
}
if tx_kernels.len() >= KERNEL_BATCH_SIZE || n + 1 >= self.kernel_pmmr.unpruned_size() {
TxKernel::batch_sig_verify(&tx_kernels)?;
kern_count += tx_kernels.len() as u64;
tx_kernels.clear();
status.on_validation_kernels(kern_count, total_kernels);
if let Some(ref s) = stop_state {
if s.is_stopped() {
return Ok(());
}
}
debug!(
"txhashset: verify_kernel_signatures: verified {} signatures",
kern_count,
);
}
}
debug!(
"txhashset: verified {} kernel signatures, pmmr size {}, took {}s",
kern_count,
self.kernel_pmmr.unpruned_size(),
now.elapsed().as_secs(),
);
Ok(())
}
fn verify_rangeproofs(
&self,
status: Option<&dyn TxHashsetWriteStatus>,
start_pos: Option<u64>,
batch_size: Option<usize>,
single_iter: bool,
stop_state: Option<Arc<StopState>>,
) -> Result<u64, Error> {
let now = Instant::now();
let batch_size = batch_size.unwrap_or(1_000);
let mut commits: Vec<Commitment> = Vec::with_capacity(batch_size);
let mut proofs: Vec<RangeProof> = Vec::with_capacity(batch_size);
let mut proof_count = 0;
if let Some(s) = start_pos {
if let Some(i) = pmmr::pmmr_leaf_to_insertion_index(s) {
proof_count = self.output_pmmr.n_unpruned_leaves_to_index(i) as usize;
}
}
let total_rproofs = self.output_pmmr.n_unpruned_leaves();
for pos0 in self.output_pmmr.leaf_pos_iter() {
if let Some(p) = start_pos {
if pos0 < p {
continue;
}
}
let output = self.output_pmmr.get_data(pos0);
let proof = self.rproof_pmmr.get_data(pos0);
match (output, proof) {
(None, _) => return Err(Error::OutputNotFound),
(_, None) => return Err(Error::RangeproofNotFound),
(Some(output), Some(proof)) => {
commits.push(output.commit);
proofs.push(proof);
}
}
proof_count += 1;
if proofs.len() >= batch_size {
Output::batch_verify_proofs(&commits, &proofs)?;
commits.clear();
proofs.clear();
debug!(
"txhashset: verify_rangeproofs: verified {} rangeproofs",
proof_count,
);
if let Some(s) = status {
s.on_validation_rproofs(proof_count as u64, total_rproofs);
}
if let Some(ref s) = stop_state {
if s.is_stopped() {
return Ok(pos0);
}
}
if single_iter {
return Ok(pos0);
}
}
}
if !proofs.is_empty() {
Output::batch_verify_proofs(&commits, &proofs)?;
commits.clear();
proofs.clear();
debug!(
"txhashset: verify_rangeproofs: verified {} rangeproofs",
proof_count,
);
}
debug!(
"txhashset: verified {} rangeproofs, pmmr size {}, took {}s",
proof_count,
self.rproof_pmmr.unpruned_size(),
now.elapsed().as_secs(),
);
Ok(0)
}
}
pub fn zip_read(root_dir: String, header: &BlockHeader) -> Result<File, Error> {
let txhashset_zip = format!("{}_{}.zip", TXHASHSET_ZIP, header.hash().to_string());
let txhashset_path = Path::new(&root_dir).join(TXHASHSET_SUBDIR);
let zip_path = Path::new(&root_dir).join(txhashset_zip);
let zip_file = File::open(zip_path.clone());
if let Ok(zip) = zip_file {
debug!(
"zip_read: {} at {}: reusing existing zip file: {:?}",
header.hash(),
header.height,
zip_path
);
return Ok(zip);
} else {
let data_dir = Path::new(&root_dir);
let pattern = format!("{}_", TXHASHSET_ZIP);
if let Ok(n) = clean_files_by_prefix(data_dir, &pattern, 24 * 60 * 60) {
debug!(
"{} zip files have been clean up in folder: {:?}",
n, data_dir
);
}
}
let path_to_be_cleanup = {
let temp_txhashset_path = Path::new(&root_dir).join(format!(
"{}_zip_{}",
TXHASHSET_SUBDIR,
header.hash().to_string()
));
if temp_txhashset_path.exists() {
fs::remove_dir_all(&temp_txhashset_path)?;
}
file::copy_dir_to(&txhashset_path, &temp_txhashset_path)?;
let zip_file = File::create(zip_path.clone())?;
let files = file_list(header);
zip::create_zip(&zip_file, &temp_txhashset_path, files)?;
temp_txhashset_path
};
debug!(
"zip_read: {} at {}: created zip file: {:?}",
header.hash(),
header.height,
zip_path
);
let zip_file = File::open(zip_path.clone())?;
if let Err(e) = fs::remove_dir_all(&path_to_be_cleanup) {
warn!(
"txhashset zip file: {:?} fail to remove, err: {}",
zip_path.to_str(),
e
);
}
Ok(zip_file)
}
fn file_list(header: &BlockHeader) -> Vec<PathBuf> {
vec![
PathBuf::from("kernel/pmmr_data.bin"),
PathBuf::from("kernel/pmmr_hash.bin"),
PathBuf::from("output/pmmr_data.bin"),
PathBuf::from("output/pmmr_hash.bin"),
PathBuf::from("output/pmmr_prun.bin"),
PathBuf::from("rangeproof/pmmr_data.bin"),
PathBuf::from("rangeproof/pmmr_hash.bin"),
PathBuf::from("rangeproof/pmmr_prun.bin"),
PathBuf::from(format!("output/pmmr_leaf.bin.{}", header.hash())),
PathBuf::from(format!("rangeproof/pmmr_leaf.bin.{}", header.hash())),
]
}
pub fn zip_write(
root_dir: PathBuf,
txhashset_data: File,
header: &BlockHeader,
) -> Result<(), Error> {
debug!("zip_write on path: {:?}", root_dir);
let txhashset_path = root_dir.join(TXHASHSET_SUBDIR);
fs::create_dir_all(&txhashset_path)?;
let files = file_list(header);
zip::extract_files(txhashset_data, &txhashset_path, files)?;
Ok(())
}
pub fn txhashset_replace(from: PathBuf, to: PathBuf) -> Result<(), Error> {
debug!("txhashset_replace: move from {:?} to {:?}", from, to);
clean_txhashset_folder(&to);
if let Err(e) = fs::rename(from.join(TXHASHSET_SUBDIR), to.join(TXHASHSET_SUBDIR)) {
error!("hashset_replace fail on {}. err: {}", TXHASHSET_SUBDIR, e);
Err(Error::TxHashSetErr("txhashset replacing fail".to_string()))
} else {
Ok(())
}
}
pub fn clean_txhashset_folder(root_dir: &PathBuf) {
let txhashset_path = root_dir.clone().join(TXHASHSET_SUBDIR);
if txhashset_path.exists() {
if let Err(e) = fs::remove_dir_all(txhashset_path.clone()) {
warn!(
"clean_txhashset_folder: fail on {:?}. err: {}",
txhashset_path, e
);
}
}
}
fn input_pos_to_rewind(
block_header: &BlockHeader,
head_header: &BlockHeader,
batch: &Batch<'_>,
) -> Result<Bitmap, Error> {
let mut bitmap = Bitmap::new();
let mut current = head_header.clone();
while current.height > block_header.height {
if let Ok(block_bitmap) = batch.get_block_input_bitmap(¤t.hash()) {
bitmap.or_inplace(&block_bitmap);
}
current = batch.get_previous_header(¤t)?;
}
Ok(bitmap)
}
fn apply_kernel_rules(kernel: &TxKernel, pos: CommitPos, batch: &Batch<'_>) -> Result<(), Error> {
if !global::is_nrd_enabled() {
return Ok(());
}
match kernel.features {
KernelFeatures::NoRecentDuplicate {
relative_height, ..
} => {
let kernel_index = store::nrd_recent_kernel_index();
debug!("checking NRD index: {:?}", kernel.excess());
if let Some(prev) = kernel_index.peek_pos(batch, kernel.excess())? {
let diff = pos.height.saturating_sub(prev.height);
debug!(
"NRD check: {}, {:?}, {:?}",
pos.height, prev, relative_height
);
if diff < relative_height.into() {
return Err(Error::NRDRelativeHeight);
}
}
debug!(
"pushing entry to NRD index: {:?}: {:?}",
kernel.excess(),
pos,
);
kernel_index.push_pos(batch, kernel.excess(), pos)?;
}
_ => {}
}
Ok(())
}