use crate::core::core::committed::Committed;
use crate::core::core::hash::{Hash, Hashed};
use crate::core::core::merkle_proof::MerkleProof;
use crate::core::core::pmmr::{self, ReadonlyPMMR, RewindablePMMR, PMMR};
use crate::core::core::{
Block, BlockHeader, Input, Output, OutputIdentifier, TxKernel, TxKernelEntry,
};
use crate::core::global;
use crate::core::ser::{PMMRIndexHashable, PMMRable};
use crate::error::{Error, ErrorKind};
use crate::store::{Batch, ChainStore};
use crate::txhashset::{RewindableKernelView, UTXOView};
use crate::types::{Tip, TxHashSetRoots, TxHashsetWriteStatus};
use crate::util::secp::pedersen::{Commitment, RangeProof};
use crate::util::{file, secp_static, zip};
use croaring::Bitmap;
use grin_store;
use grin_store::pmmr::{PMMRBackend, PMMR_FILES};
use grin_store::types::prune_noop;
use std::collections::HashSet;
use std::fs::{self, File};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::{Instant, SystemTime, UNIX_EPOCH};
const HEADERHASHSET_SUBDIR: &'static str = "header";
const TXHASHSET_SUBDIR: &'static str = "txhashset";
const HEADER_HEAD_SUBDIR: &'static str = "header_head";
const SYNC_HEAD_SUBDIR: &'static str = "sync_head";
const OUTPUT_SUBDIR: &'static str = "output";
const RANGE_PROOF_SUBDIR: &'static str = "rangeproof";
const KERNEL_SUBDIR: &'static str = "kernel";
const TXHASHSET_ZIP: &'static str = "txhashset_snapshot";
struct PMMRHandle<T: PMMRable> {
backend: PMMRBackend<T>,
last_pos: u64,
}
impl<T: PMMRable> PMMRHandle<T> {
fn new(
root_dir: &str,
sub_dir: &str,
file_name: &str,
prunable: bool,
header: Option<&BlockHeader>,
) -> Result<PMMRHandle<T>, Error> {
let path = Path::new(root_dir).join(sub_dir).join(file_name);
fs::create_dir_all(path.clone())?;
let backend = PMMRBackend::new(path.to_str().unwrap().to_string(), prunable, header)?;
let last_pos = backend.unpruned_size();
Ok(PMMRHandle { backend, last_pos })
}
}
pub struct TxHashSet {
header_pmmr_h: PMMRHandle<BlockHeader>,
sync_pmmr_h: PMMRHandle<BlockHeader>,
output_pmmr_h: PMMRHandle<Output>,
rproof_pmmr_h: PMMRHandle<RangeProof>,
kernel_pmmr_h: PMMRHandle<TxKernel>,
commit_index: Arc<ChainStore>,
}
impl TxHashSet {
pub fn open(
root_dir: String,
commit_index: Arc<ChainStore>,
header: Option<&BlockHeader>,
) -> Result<TxHashSet, Error> {
Ok(TxHashSet {
header_pmmr_h: PMMRHandle::new(
&root_dir,
HEADERHASHSET_SUBDIR,
HEADER_HEAD_SUBDIR,
false,
None,
)?,
sync_pmmr_h: PMMRHandle::new(
&root_dir,
HEADERHASHSET_SUBDIR,
SYNC_HEAD_SUBDIR,
false,
None,
)?,
output_pmmr_h: PMMRHandle::new(
&root_dir,
TXHASHSET_SUBDIR,
OUTPUT_SUBDIR,
true,
header,
)?,
rproof_pmmr_h: PMMRHandle::new(
&root_dir,
TXHASHSET_SUBDIR,
RANGE_PROOF_SUBDIR,
true,
header,
)?,
kernel_pmmr_h: PMMRHandle::new(
&root_dir,
TXHASHSET_SUBDIR,
KERNEL_SUBDIR,
false,
None,
)?,
commit_index,
})
}
pub fn is_unspent(&self, output_id: &OutputIdentifier) -> Result<(Hash, u64), Error> {
match self.commit_index.get_output_pos(&output_id.commit) {
Ok(pos) => {
let output_pmmr: ReadonlyPMMR<'_, Output, _> =
ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
if let Some(hash) = output_pmmr.get_hash(pos) {
if hash == output_id.hash_with_index(pos - 1) {
Ok((hash, pos))
} else {
Err(ErrorKind::TxHashSetErr(format!("txhashset hash mismatch")).into())
}
} else {
Err(ErrorKind::OutputNotFound.into())
}
}
Err(grin_store::Error::NotFoundErr(_)) => Err(ErrorKind::OutputNotFound.into()),
Err(e) => Err(ErrorKind::StoreErr(e, format!("txhashset unspent check")).into()),
}
}
pub fn last_n_output(&self, distance: u64) -> Vec<(Hash, OutputIdentifier)> {
ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos)
.get_last_n_insertions(distance)
}
pub fn last_n_rangeproof(&self, distance: u64) -> Vec<(Hash, RangeProof)> {
ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos)
.get_last_n_insertions(distance)
}
pub fn last_n_kernel(&self, distance: u64) -> Vec<(Hash, TxKernelEntry)> {
ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos)
.get_last_n_insertions(distance)
}
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
let pos = pmmr::insertion_to_pmmr_index(height + 1);
let header_pmmr =
ReadonlyPMMR::at(&self.header_pmmr_h.backend, self.header_pmmr_h.last_pos);
if let Some(entry) = header_pmmr.get_data(pos) {
let header = self.commit_index.get_block_header(&entry.hash())?;
Ok(header)
} else {
Err(ErrorKind::Other(format!("get header by height")).into())
}
}
pub fn outputs_by_insertion_index(
&self,
start_index: u64,
max_count: u64,
) -> (u64, Vec<OutputIdentifier>) {
ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos)
.elements_from_insertion_index(start_index, max_count)
}
pub fn highest_output_insertion_index(&self) -> u64 {
pmmr::n_leaves(self.output_pmmr_h.last_pos)
}
pub fn rangeproofs_by_insertion_index(
&self,
start_index: u64,
max_count: u64,
) -> (u64, Vec<RangeProof>) {
ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos)
.elements_from_insertion_index(start_index, max_count)
}
pub fn roots(&self) -> TxHashSetRoots {
let header_pmmr =
ReadonlyPMMR::at(&self.header_pmmr_h.backend, self.header_pmmr_h.last_pos);
let output_pmmr =
ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
let rproof_pmmr =
ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
let kernel_pmmr =
ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
TxHashSetRoots {
header_root: header_pmmr.root(),
output_root: output_pmmr.root(),
rproof_root: rproof_pmmr.root(),
kernel_root: kernel_pmmr.root(),
}
}
pub fn get_output_pos(&self, commit: &Commitment) -> Result<u64, Error> {
Ok(self.commit_index.get_output_pos(&commit)?)
}
pub fn merkle_proof(&mut self, commit: Commitment) -> Result<MerkleProof, String> {
let pos = self.commit_index.get_output_pos(&commit).unwrap();
PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos).merkle_proof(pos)
}
pub fn compact(&mut self) -> Result<(), Error> {
let commit_index = self.commit_index.clone();
let head_header = commit_index.head_header()?;
let current_height = head_header.height;
let horizon = current_height.saturating_sub(global::cut_through_horizon().into());
let horizon_header = self.get_header_by_height(horizon)?;
let batch = self.commit_index.batch()?;
let rewind_rm_pos = input_pos_to_rewind(&horizon_header, &head_header, &batch)?;
{
let clean_output_index = |commit: &[u8]| {
let _ = batch.delete_output_pos(commit);
};
self.output_pmmr_h.backend.check_compact(
horizon_header.output_mmr_size,
&rewind_rm_pos,
clean_output_index,
)?;
self.rproof_pmmr_h.backend.check_compact(
horizon_header.output_mmr_size,
&rewind_rm_pos,
&prune_noop,
)?;
}
batch.commit()?;
Ok(())
}
}
pub fn extending_readonly<'a, F, T>(trees: &'a mut TxHashSet, inner: F) -> Result<T, Error>
where
F: FnOnce(&mut Extension<'_>) -> Result<T, Error>,
{
let commit_index = trees.commit_index.clone();
let batch = commit_index.batch()?;
let header = batch.head_header()?;
trace!("Starting new txhashset (readonly) extension.");
let res = {
let mut extension = Extension::new(trees, &batch, header);
extension.force_rollback();
inner(&mut extension)
};
trace!("Rollbacking txhashset (readonly) extension.");
trees.header_pmmr_h.backend.discard();
trees.output_pmmr_h.backend.discard();
trees.rproof_pmmr_h.backend.discard();
trees.kernel_pmmr_h.backend.discard();
trace!("TxHashSet (readonly) extension done.");
res
}
pub fn utxo_view<'a, F, T>(trees: &'a TxHashSet, inner: F) -> Result<T, Error>
where
F: FnOnce(&UTXOView<'_>) -> Result<T, Error>,
{
let res: Result<T, Error>;
{
let output_pmmr =
ReadonlyPMMR::at(&trees.output_pmmr_h.backend, trees.output_pmmr_h.last_pos);
let header_pmmr =
ReadonlyPMMR::at(&trees.header_pmmr_h.backend, trees.header_pmmr_h.last_pos);
let batch = trees.commit_index.batch()?;
let utxo = UTXOView::new(output_pmmr, header_pmmr, &batch);
res = inner(&utxo);
}
res
}
pub fn rewindable_kernel_view<'a, F, T>(trees: &'a TxHashSet, inner: F) -> Result<T, Error>
where
F: FnOnce(&mut RewindableKernelView<'_>) -> Result<T, Error>,
{
let res: Result<T, Error>;
{
let kernel_pmmr =
RewindablePMMR::at(&trees.kernel_pmmr_h.backend, trees.kernel_pmmr_h.last_pos);
let batch = trees.commit_index.batch()?;
let header = batch.head_header()?;
let mut view = RewindableKernelView::new(kernel_pmmr, &batch, header);
res = inner(&mut view);
}
res
}
pub fn extending<'a, F, T>(
trees: &'a mut TxHashSet,
batch: &'a mut Batch<'_>,
inner: F,
) -> Result<T, Error>
where
F: FnOnce(&mut Extension<'_>) -> Result<T, Error>,
{
let sizes: (u64, u64, u64, u64);
let res: Result<T, Error>;
let rollback: bool;
let header = batch.head_header()?;
let child_batch = batch.child()?;
{
trace!("Starting new txhashset extension.");
let mut extension = Extension::new(trees, &child_batch, header);
res = inner(&mut extension);
rollback = extension.rollback;
sizes = extension.sizes();
}
match res {
Err(e) => {
debug!("Error returned, discarding txhashset extension: {}", e);
trees.header_pmmr_h.backend.discard();
trees.output_pmmr_h.backend.discard();
trees.rproof_pmmr_h.backend.discard();
trees.kernel_pmmr_h.backend.discard();
Err(e)
}
Ok(r) => {
if rollback {
trace!("Rollbacking txhashset extension. sizes {:?}", sizes);
trees.header_pmmr_h.backend.discard();
trees.output_pmmr_h.backend.discard();
trees.rproof_pmmr_h.backend.discard();
trees.kernel_pmmr_h.backend.discard();
} else {
trace!("Committing txhashset extension. sizes {:?}", sizes);
child_batch.commit()?;
trees.header_pmmr_h.backend.sync()?;
trees.output_pmmr_h.backend.sync()?;
trees.rproof_pmmr_h.backend.sync()?;
trees.kernel_pmmr_h.backend.sync()?;
trees.header_pmmr_h.last_pos = sizes.0;
trees.output_pmmr_h.last_pos = sizes.1;
trees.rproof_pmmr_h.last_pos = sizes.2;
trees.kernel_pmmr_h.last_pos = sizes.3;
}
trace!("TxHashSet extension done.");
Ok(r)
}
}
}
pub fn sync_extending<'a, F, T>(
trees: &'a mut TxHashSet,
batch: &'a mut Batch<'_>,
inner: F,
) -> Result<T, Error>
where
F: FnOnce(&mut HeaderExtension<'_>) -> Result<T, Error>,
{
let size: u64;
let res: Result<T, Error>;
let rollback: bool;
let head = batch.get_sync_head()?;
let header = batch.get_block_header(&head.last_block_h)?;
let child_batch = batch.child()?;
{
trace!("Starting new txhashset sync_head extension.");
let pmmr = PMMR::at(&mut trees.sync_pmmr_h.backend, trees.sync_pmmr_h.last_pos);
let mut extension = HeaderExtension::new(pmmr, &child_batch, header);
res = inner(&mut extension);
rollback = extension.rollback;
size = extension.size();
}
match res {
Err(e) => {
debug!(
"Error returned, discarding txhashset sync_head extension: {}",
e
);
trees.sync_pmmr_h.backend.discard();
Err(e)
}
Ok(r) => {
if rollback {
trace!("Rollbacking txhashset sync_head extension. size {:?}", size);
trees.sync_pmmr_h.backend.discard();
} else {
trace!("Committing txhashset sync_head extension. size {:?}", size);
child_batch.commit()?;
trees.sync_pmmr_h.backend.sync()?;
trees.sync_pmmr_h.last_pos = size;
}
trace!("TxHashSet sync_head extension done.");
Ok(r)
}
}
}
pub fn header_extending<'a, F, T>(
trees: &'a mut TxHashSet,
batch: &'a mut Batch<'_>,
inner: F,
) -> Result<T, Error>
where
F: FnOnce(&mut HeaderExtension<'_>) -> Result<T, Error>,
{
let size: u64;
let res: Result<T, Error>;
let rollback: bool;
let head = batch.head()?;
let header = batch.get_block_header(&head.last_block_h)?;
let child_batch = batch.child()?;
{
trace!("Starting new txhashset header extension.");
let pmmr = PMMR::at(
&mut trees.header_pmmr_h.backend,
trees.header_pmmr_h.last_pos,
);
let mut extension = HeaderExtension::new(pmmr, &child_batch, header);
res = inner(&mut extension);
rollback = extension.rollback;
size = extension.size();
}
match res {
Err(e) => {
debug!(
"Error returned, discarding txhashset header extension: {}",
e
);
trees.header_pmmr_h.backend.discard();
Err(e)
}
Ok(r) => {
if rollback {
trace!("Rollbacking txhashset header extension. size {:?}", size);
trees.header_pmmr_h.backend.discard();
} else {
trace!("Committing txhashset header extension. size {:?}", size);
child_batch.commit()?;
trees.header_pmmr_h.backend.sync()?;
trees.header_pmmr_h.last_pos = size;
}
trace!("TxHashSet header extension done.");
Ok(r)
}
}
}
pub struct HeaderExtension<'a> {
header: BlockHeader,
pmmr: PMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
rollback: bool,
pub batch: &'a Batch<'a>,
}
impl<'a> HeaderExtension<'a> {
fn new(
pmmr: PMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
batch: &'a Batch<'_>,
header: BlockHeader,
) -> HeaderExtension<'a> {
HeaderExtension {
header,
pmmr,
rollback: false,
batch,
}
}
fn get_header_hash(&self, pos: u64) -> Option<Hash> {
self.pmmr.get_data(pos).map(|x| x.hash())
}
pub fn get_header_by_height(&mut self, height: u64) -> Result<BlockHeader, Error> {
let pos = pmmr::insertion_to_pmmr_index(height + 1);
if let Some(hash) = self.get_header_hash(pos) {
let header = self.batch.get_block_header(&hash)?;
Ok(header)
} else {
Err(ErrorKind::Other(format!("get header by height")).into())
}
}
pub fn is_on_current_chain(&mut self, header: &BlockHeader) -> Result<(), Error> {
let chain_header = self.get_header_by_height(header.height)?;
if chain_header.hash() == header.hash() {
Ok(())
} else {
Err(ErrorKind::Other(format!("not on current chain")).into())
}
}
pub fn force_rollback(&mut self) {
self.rollback = true;
}
pub fn apply_header(&mut self, header: &BlockHeader) -> Result<Hash, Error> {
self.pmmr.push(header).map_err(&ErrorKind::TxHashSetErr)?;
self.header = header.clone();
Ok(self.root())
}
pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> {
debug!(
"Rewind header extension to {} at {}",
header.hash(),
header.height
);
let header_pos = pmmr::insertion_to_pmmr_index(header.height + 1);
self.pmmr
.rewind(header_pos, &Bitmap::create())
.map_err(&ErrorKind::TxHashSetErr)?;
self.header = header.clone();
Ok(())
}
pub fn truncate(&mut self) -> Result<(), Error> {
debug!("Truncating header extension.");
self.pmmr
.rewind(0, &Bitmap::create())
.map_err(&ErrorKind::TxHashSetErr)?;
Ok(())
}
pub fn size(&self) -> u64 {
self.pmmr.unpruned_size()
}
pub fn rebuild(&mut self, head: &Tip, genesis: &BlockHeader) -> Result<(), Error> {
debug!(
"About to rebuild header extension from {:?} to {:?}.",
genesis.hash(),
head.last_block_h,
);
let mut header_hashes = vec![];
let mut current = self.batch.get_block_header(&head.last_block_h)?;
while current.height > 0 {
header_hashes.push(current.hash());
current = self.batch.get_previous_header(¤t)?;
}
header_hashes.reverse();
self.truncate()?;
self.apply_header(&genesis)?;
if header_hashes.len() > 0 {
debug!(
"Re-applying {} headers to extension, from {:?} to {:?}.",
header_hashes.len(),
header_hashes.first().unwrap(),
header_hashes.last().unwrap(),
);
for h in header_hashes {
let header = self.batch.get_block_header(&h)?;
self.validate_root(&header)?;
self.apply_header(&header)?;
}
}
Ok(())
}
pub fn root(&self) -> Hash {
self.pmmr.root()
}
pub fn validate_root(&self, header: &BlockHeader) -> Result<(), Error> {
if header.height == 0 {
return Ok(());
}
if self.root() != header.prev_root {
Err(ErrorKind::InvalidRoot.into())
} else {
Ok(())
}
}
}
pub struct Extension<'a> {
header: BlockHeader,
header_pmmr: PMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
output_pmmr: PMMR<'a, Output, PMMRBackend<Output>>,
rproof_pmmr: PMMR<'a, RangeProof, PMMRBackend<RangeProof>>,
kernel_pmmr: PMMR<'a, TxKernel, PMMRBackend<TxKernel>>,
rollback: bool,
pub batch: &'a Batch<'a>,
}
impl<'a> Committed for Extension<'a> {
fn inputs_committed(&self) -> Vec<Commitment> {
vec![]
}
fn outputs_committed(&self) -> Vec<Commitment> {
let mut commitments = vec![];
for n in 1..self.output_pmmr.unpruned_size() + 1 {
if pmmr::is_leaf(n) {
if let Some(out) = self.output_pmmr.get_data(n) {
commitments.push(out.commit);
}
}
}
commitments
}
fn kernels_committed(&self) -> Vec<Commitment> {
let mut commitments = vec![];
for n in 1..self.kernel_pmmr.unpruned_size() + 1 {
if pmmr::is_leaf(n) {
if let Some(kernel) = self.kernel_pmmr.get_data(n) {
commitments.push(kernel.excess());
}
}
}
commitments
}
}
impl<'a> Extension<'a> {
fn new(trees: &'a mut TxHashSet, batch: &'a Batch<'_>, header: BlockHeader) -> Extension<'a> {
Extension {
header,
header_pmmr: PMMR::at(
&mut trees.header_pmmr_h.backend,
trees.header_pmmr_h.last_pos,
),
output_pmmr: PMMR::at(
&mut trees.output_pmmr_h.backend,
trees.output_pmmr_h.last_pos,
),
rproof_pmmr: PMMR::at(
&mut trees.rproof_pmmr_h.backend,
trees.rproof_pmmr_h.last_pos,
),
kernel_pmmr: PMMR::at(
&mut trees.kernel_pmmr_h.backend,
trees.kernel_pmmr_h.last_pos,
),
rollback: false,
batch,
}
}
pub fn utxo_view(&'a self) -> UTXOView<'a> {
UTXOView::new(
self.output_pmmr.readonly_pmmr(),
self.header_pmmr.readonly_pmmr(),
self.batch,
)
}
pub fn apply_block(&mut self, b: &Block) -> Result<(), Error> {
self.apply_header(&b.header)?;
for out in b.outputs() {
let pos = self.apply_output(out)?;
self.batch.save_output_pos(&out.commitment(), pos)?;
}
for input in b.inputs() {
self.apply_input(input)?;
}
for kernel in b.kernels() {
self.apply_kernel(kernel)?;
}
self.header = b.header.clone();
Ok(())
}
fn apply_input(&mut self, input: &Input) -> Result<(), Error> {
let commit = input.commitment();
let pos_res = self.batch.get_output_pos(&commit);
if let Ok(pos) = pos_res {
if let Some(hash) = self.output_pmmr.get_hash(pos) {
if hash != input.hash_with_index(pos - 1) {
return Err(
ErrorKind::TxHashSetErr(format!("output pmmr hash mismatch")).into(),
);
}
}
match self.output_pmmr.prune(pos) {
Ok(true) => {
self.rproof_pmmr
.prune(pos)
.map_err(|e| ErrorKind::TxHashSetErr(e))?;
}
Ok(false) => return Err(ErrorKind::AlreadySpent(commit).into()),
Err(e) => return Err(ErrorKind::TxHashSetErr(e).into()),
}
} else {
return Err(ErrorKind::AlreadySpent(commit).into());
}
Ok(())
}
fn apply_output(&mut self, out: &Output) -> Result<(u64), Error> {
let commit = out.commitment();
if let Ok(pos) = self.batch.get_output_pos(&commit) {
if let Some(out_mmr) = self.output_pmmr.get_data(pos) {
if out_mmr.commitment() == commit {
return Err(ErrorKind::DuplicateCommitment(commit).into());
}
}
}
let output_pos = self
.output_pmmr
.push(out)
.map_err(&ErrorKind::TxHashSetErr)?;
let rproof_pos = self
.rproof_pmmr
.push(&out.proof)
.map_err(&ErrorKind::TxHashSetErr)?;
{
if self.output_pmmr.unpruned_size() != self.rproof_pmmr.unpruned_size() {
return Err(
ErrorKind::Other(format!("output vs rproof MMRs different sizes")).into(),
);
}
if output_pos != rproof_pos {
return Err(ErrorKind::Other(format!("output vs rproof MMRs different pos")).into());
}
}
Ok(output_pos)
}
fn apply_kernel(&mut self, kernel: &TxKernel) -> Result<(), Error> {
self.kernel_pmmr
.push(kernel)
.map_err(&ErrorKind::TxHashSetErr)?;
Ok(())
}
fn apply_header(&mut self, header: &BlockHeader) -> Result<(), Error> {
self.header_pmmr
.push(header)
.map_err(&ErrorKind::TxHashSetErr)?;
Ok(())
}
fn get_header_hash(&self, pos: u64) -> Option<Hash> {
self.header_pmmr.get_data(pos).map(|x| x.hash())
}
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
let pos = pmmr::insertion_to_pmmr_index(height + 1);
if let Some(hash) = self.get_header_hash(pos) {
let header = self.batch.get_block_header(&hash)?;
Ok(header)
} else {
Err(ErrorKind::Other(format!("get header by height")).into())
}
}
pub fn is_on_current_chain(&mut self, header: &BlockHeader) -> Result<(), Error> {
let chain_header = self.get_header_by_height(header.height)?;
if chain_header.hash() == header.hash() {
Ok(())
} else {
Err(ErrorKind::Other(format!("not on current chain")).into())
}
}
pub fn merkle_proof(&self, output: &OutputIdentifier) -> Result<MerkleProof, Error> {
debug!("txhashset: merkle_proof: output: {:?}", output.commit,);
let pos = self.batch.get_output_pos(&output.commit)?;
let merkle_proof = self
.output_pmmr
.merkle_proof(pos)
.map_err(&ErrorKind::TxHashSetErr)?;
Ok(merkle_proof)
}
pub fn snapshot(&mut self) -> Result<(), Error> {
self.output_pmmr
.snapshot(&self.header)
.map_err(|e| ErrorKind::Other(e))?;
self.rproof_pmmr
.snapshot(&self.header)
.map_err(|e| ErrorKind::Other(e))?;
Ok(())
}
pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> {
debug!("Rewind to header {} at {}", header.hash(), header.height,);
let rewind_rm_pos = input_pos_to_rewind(header, &self.header, &self.batch)?;
let header_pos = pmmr::insertion_to_pmmr_index(header.height + 1);
self.rewind_to_pos(
header_pos,
header.output_mmr_size,
header.kernel_mmr_size,
&rewind_rm_pos,
)?;
self.header = header.clone();
Ok(())
}
fn rewind_to_pos(
&mut self,
header_pos: u64,
output_pos: u64,
kernel_pos: u64,
rewind_rm_pos: &Bitmap,
) -> Result<(), Error> {
debug!(
"txhashset: rewind_to_pos: header {}, output {}, kernel {}",
header_pos, output_pos, kernel_pos,
);
self.header_pmmr
.rewind(header_pos, &Bitmap::create())
.map_err(&ErrorKind::TxHashSetErr)?;
self.output_pmmr
.rewind(output_pos, rewind_rm_pos)
.map_err(&ErrorKind::TxHashSetErr)?;
self.rproof_pmmr
.rewind(output_pos, rewind_rm_pos)
.map_err(&ErrorKind::TxHashSetErr)?;
self.kernel_pmmr
.rewind(kernel_pos, &Bitmap::create())
.map_err(&ErrorKind::TxHashSetErr)?;
Ok(())
}
pub fn roots(&self) -> TxHashSetRoots {
TxHashSetRoots {
header_root: self.header_pmmr.root(),
output_root: self.output_pmmr.root(),
rproof_root: self.rproof_pmmr.root(),
kernel_root: self.kernel_pmmr.root(),
}
}
pub fn header_root(&self) -> Hash {
self.header_pmmr.root()
}
pub fn validate_roots(&self) -> Result<(), Error> {
if self.header.height == 0 {
return Ok(());
}
let roots = self.roots();
if roots.output_root != self.header.output_root
|| roots.rproof_root != self.header.range_proof_root
|| roots.kernel_root != self.header.kernel_root
{
Err(ErrorKind::InvalidRoot.into())
} else {
Ok(())
}
}
pub fn validate_header_root(&self, header: &BlockHeader) -> Result<(), Error> {
if header.height == 0 {
return Ok(());
}
let roots = self.roots();
if roots.header_root != header.prev_root {
Err(ErrorKind::InvalidRoot.into())
} else {
Ok(())
}
}
pub fn validate_sizes(&self) -> Result<(), Error> {
if self.header.height == 0 {
return Ok(());
}
let (header_mmr_size, output_mmr_size, rproof_mmr_size, kernel_mmr_size) = self.sizes();
let expected_header_mmr_size = pmmr::insertion_to_pmmr_index(self.header.height + 2) - 1;
if header_mmr_size != expected_header_mmr_size {
Err(ErrorKind::InvalidMMRSize.into())
} else if output_mmr_size != self.header.output_mmr_size {
Err(ErrorKind::InvalidMMRSize.into())
} else if kernel_mmr_size != self.header.kernel_mmr_size {
Err(ErrorKind::InvalidMMRSize.into())
} else if output_mmr_size != rproof_mmr_size {
Err(ErrorKind::InvalidMMRSize.into())
} else {
Ok(())
}
}
fn validate_mmrs(&self) -> Result<(), Error> {
let now = Instant::now();
if let Err(e) = self.header_pmmr.validate() {
return Err(ErrorKind::InvalidTxHashSet(e).into());
}
if let Err(e) = self.output_pmmr.validate() {
return Err(ErrorKind::InvalidTxHashSet(e).into());
}
if let Err(e) = self.rproof_pmmr.validate() {
return Err(ErrorKind::InvalidTxHashSet(e).into());
}
if let Err(e) = self.kernel_pmmr.validate() {
return Err(ErrorKind::InvalidTxHashSet(e).into());
}
debug!(
"txhashset: validated the header {}, output {}, rproof {}, kernel {} mmrs, took {}s",
self.header_pmmr.unpruned_size(),
self.output_pmmr.unpruned_size(),
self.rproof_pmmr.unpruned_size(),
self.kernel_pmmr.unpruned_size(),
now.elapsed().as_secs(),
);
Ok(())
}
pub fn validate_kernel_sums(&self) -> Result<((Commitment, Commitment)), Error> {
let genesis = self.get_header_by_height(0)?;
let (utxo_sum, kernel_sum) = self.verify_kernel_sums(
self.header.total_overage(genesis.kernel_mmr_size > 0),
self.header.total_kernel_offset(),
)?;
Ok((utxo_sum, kernel_sum))
}
pub fn validate(
&self,
fast_validation: bool,
status: &dyn TxHashsetWriteStatus,
) -> Result<((Commitment, Commitment)), Error> {
self.validate_mmrs()?;
self.validate_roots()?;
self.validate_sizes()?;
if self.header.height == 0 {
let zero_commit = secp_static::commit_to_zero_value();
return Ok((zero_commit.clone(), zero_commit.clone()));
}
let (output_sum, kernel_sum) = self.validate_kernel_sums()?;
if !fast_validation {
self.verify_rangeproofs(status)?;
self.verify_kernel_signatures(status)?;
}
Ok((output_sum, kernel_sum))
}
pub fn rebuild_index(&self) -> Result<(), Error> {
for n in 1..self.output_pmmr.unpruned_size() + 1 {
if pmmr::bintree_postorder_height(n) == 0 {
if let Some(out) = self.output_pmmr.get_data(n) {
self.batch.save_output_pos(&out.commit, n)?;
}
}
}
Ok(())
}
pub fn force_rollback(&mut self) {
self.rollback = true;
}
pub fn dump_output_pmmr(&self) {
debug!("-- outputs --");
self.output_pmmr.dump_from_file(false);
debug!("--");
self.output_pmmr.dump_stats();
debug!("-- end of outputs --");
}
pub fn dump(&self, short: bool) {
debug!("-- outputs --");
self.output_pmmr.dump(short);
if !short {
debug!("-- range proofs --");
self.rproof_pmmr.dump(short);
debug!("-- kernels --");
self.kernel_pmmr.dump(short);
}
}
pub fn sizes(&self) -> (u64, u64, u64, u64) {
(
self.header_pmmr.unpruned_size(),
self.output_pmmr.unpruned_size(),
self.rproof_pmmr.unpruned_size(),
self.kernel_pmmr.unpruned_size(),
)
}
fn verify_kernel_signatures(&self, status: &dyn TxHashsetWriteStatus) -> Result<(), Error> {
let now = Instant::now();
let mut kern_count = 0;
let total_kernels = pmmr::n_leaves(self.kernel_pmmr.unpruned_size());
for n in 1..self.kernel_pmmr.unpruned_size() + 1 {
if pmmr::is_leaf(n) {
if let Some(kernel) = self.kernel_pmmr.get_data(n) {
kernel.verify()?;
kern_count += 1;
}
}
if n % 20 == 0 {
status.on_validation(kern_count, total_kernels, 0, 0);
}
}
debug!(
"txhashset: verified {} kernel signatures, pmmr size {}, took {}s",
kern_count,
self.kernel_pmmr.unpruned_size(),
now.elapsed().as_secs(),
);
Ok(())
}
fn verify_rangeproofs(&self, status: &dyn TxHashsetWriteStatus) -> Result<(), Error> {
let now = Instant::now();
let mut commits: Vec<Commitment> = vec![];
let mut proofs: Vec<RangeProof> = vec![];
let mut proof_count = 0;
let total_rproofs = pmmr::n_leaves(self.output_pmmr.unpruned_size());
for n in 1..self.output_pmmr.unpruned_size() + 1 {
if pmmr::is_leaf(n) {
if let Some(out) = self.output_pmmr.get_data(n) {
if let Some(rp) = self.rproof_pmmr.get_data(n) {
commits.push(out.commit);
proofs.push(rp);
} else {
return Err(ErrorKind::OutputNotFound.into());
}
proof_count += 1;
if proofs.len() >= 1000 {
Output::batch_verify_proofs(&commits, &proofs)?;
commits.clear();
proofs.clear();
debug!(
"txhashset: verify_rangeproofs: verified {} rangeproofs",
proof_count,
);
}
}
}
if n % 20 == 0 {
status.on_validation(0, 0, proof_count, total_rproofs);
}
}
if proofs.len() > 0 {
Output::batch_verify_proofs(&commits, &proofs)?;
commits.clear();
proofs.clear();
debug!(
"txhashset: verify_rangeproofs: verified {} rangeproofs",
proof_count,
);
}
debug!(
"txhashset: verified {} rangeproofs, pmmr size {}, took {}s",
proof_count,
self.rproof_pmmr.unpruned_size(),
now.elapsed().as_secs(),
);
Ok(())
}
}
pub fn zip_read(root_dir: String, header: &BlockHeader, rand: Option<u32>) -> Result<File, Error> {
let ts = if let None = rand {
let now = SystemTime::now();
now.duration_since(UNIX_EPOCH).unwrap().subsec_micros()
} else {
rand.unwrap()
};
let txhashset_zip = format!("{}_{}.zip", TXHASHSET_ZIP, ts);
let txhashset_path = Path::new(&root_dir).join(TXHASHSET_SUBDIR);
let zip_path = Path::new(&root_dir).join(txhashset_zip);
{
let temp_txhashset_path =
Path::new(&root_dir).join(format!("{}_zip_{}", TXHASHSET_SUBDIR, ts));
if temp_txhashset_path.exists() {
fs::remove_dir_all(&temp_txhashset_path)?;
}
file::copy_dir_to(&txhashset_path, &temp_txhashset_path)?;
check_and_remove_files(&temp_txhashset_path, header)?;
zip::compress(&temp_txhashset_path, &File::create(zip_path.clone())?)
.map_err(|ze| ErrorKind::Other(ze.to_string()))?;
}
let zip_file = File::open(zip_path)?;
Ok(zip_file)
}
pub fn zip_write(
root_dir: String,
txhashset_data: File,
header: &BlockHeader,
) -> Result<(), Error> {
let txhashset_path = Path::new(&root_dir).join(TXHASHSET_SUBDIR);
fs::create_dir_all(txhashset_path.clone())?;
zip::decompress(txhashset_data, &txhashset_path)
.map_err(|ze| ErrorKind::Other(ze.to_string()))?;
check_and_remove_files(&txhashset_path, header)
}
fn check_and_remove_files(txhashset_path: &PathBuf, header: &BlockHeader) -> Result<(), Error> {
let subdirectories_expected: HashSet<_> = [OUTPUT_SUBDIR, KERNEL_SUBDIR, RANGE_PROOF_SUBDIR]
.iter()
.cloned()
.map(|s| String::from(s))
.collect();
let subdirectories_found: HashSet<_> = fs::read_dir(txhashset_path)?
.filter_map(|entry| {
entry.ok().and_then(|e| {
e.path()
.file_name()
.and_then(|n| n.to_str().map(|s| String::from(s)))
})
})
.collect();
let dir_difference: Vec<String> = subdirectories_found
.difference(&subdirectories_expected)
.cloned()
.collect();
if !dir_difference.is_empty() {
debug!("Unexpected folder(s) found in txhashset folder, removing.");
for diff in dir_difference {
let diff_path = txhashset_path.join(diff);
file::delete(diff_path)?;
}
}
let mut pmmr_files_expected: HashSet<_> = PMMR_FILES
.iter()
.cloned()
.map(|s| {
if s.contains("pmmr_leaf.bin") {
format!("{}.{}", s, header.hash())
} else {
String::from(s)
}
})
.collect();
pmmr_files_expected.insert(format!("pmmr_leaf.bin.{}...", header.hash()));
let subdirectories = fs::read_dir(txhashset_path)?;
for subdirectory in subdirectories {
let subdirectory_path = subdirectory?.path();
let pmmr_files = fs::read_dir(&subdirectory_path)?;
let pmmr_files_found: HashSet<_> = pmmr_files
.filter_map(|entry| {
entry.ok().and_then(|e| {
e.path()
.file_name()
.and_then(|n| n.to_str().map(|s| String::from(s)))
})
})
.collect();
let difference: Vec<String> = pmmr_files_found
.difference(&pmmr_files_expected)
.cloned()
.collect();
if !difference.is_empty() {
debug!(
"Unexpected file(s) found in txhashset subfolder {:?}, removing.",
&subdirectory_path
);
for diff in difference {
let diff_path = subdirectory_path.join(diff);
file::delete(diff_path.clone())?;
debug!(
"check_and_remove_files: unexpected file '{:?}' removed",
diff_path
);
}
}
}
Ok(())
}
pub fn input_pos_to_rewind(
block_header: &BlockHeader,
head_header: &BlockHeader,
batch: &Batch<'_>,
) -> Result<Bitmap, Error> {
if head_header.height < block_header.height {
debug!(
"input_pos_to_rewind: {} < {}, nothing to rewind",
head_header.height, block_header.height
);
return Ok(Bitmap::create());
}
let bitmap_fast_or = |b_res, block_input_bitmaps: &mut Vec<Bitmap>| -> Option<Bitmap> {
if let Some(b) = b_res {
block_input_bitmaps.push(b);
if block_input_bitmaps.len() < 256 {
return None;
}
}
let bitmap = Bitmap::fast_or(&block_input_bitmaps.iter().collect::<Vec<&Bitmap>>());
block_input_bitmaps.clear();
block_input_bitmaps.push(bitmap.clone());
Some(bitmap)
};
let mut block_input_bitmaps: Vec<Bitmap> = vec![];
let mut current = head_header.clone();
while current.hash() != block_header.hash() {
if current.height < 1 {
break;
}
if let Ok(b_res) = batch.get_block_input_bitmap(¤t.hash()) {
bitmap_fast_or(Some(b_res), &mut block_input_bitmaps);
}
current = batch.get_previous_header(¤t)?;
}
let bitmap = bitmap_fast_or(None, &mut block_input_bitmaps).unwrap();
Ok(bitmap)
}