use alloc::boxed::Box;
use alloc::collections::{BTreeMap, BTreeSet};
use alloc::vec::Vec;
use crate::account::AccountId;
use crate::account::delta::AccountUpdateDetails;
use crate::batch::{
BatchAccountUpdate,
BatchId,
InputOutputNoteTracker,
OrderedBatches,
ProvenBatch,
};
use crate::block::block_inputs::BlockInputs;
use crate::block::{
AccountUpdateWitness,
AccountWitness,
BlockHeader,
BlockNumber,
NullifierWitness,
OutputNoteBatch,
};
use crate::errors::ProposedBlockError;
use crate::note::{NoteId, Nullifier};
use crate::transaction::{InputNoteCommitment, OutputNote, PartialBlockchain, TransactionHeader};
use crate::utils::serde::{
ByteReader,
ByteWriter,
Deserializable,
DeserializationError,
Serializable,
};
use crate::{EMPTY_WORD, MAX_BATCHES_PER_BLOCK, Word};
#[derive(Debug, Clone)]
pub struct ProposedBlock {
batches: OrderedBatches,
timestamp: u32,
account_updated_witnesses: Vec<(AccountId, AccountUpdateWitness)>,
output_note_batches: Vec<OutputNoteBatch>,
created_nullifiers: BTreeMap<Nullifier, NullifierWitness>,
partial_blockchain: PartialBlockchain,
prev_block_header: BlockHeader,
}
impl ProposedBlock {
pub fn new_at(
block_inputs: BlockInputs,
batches: Vec<ProvenBatch>,
timestamp: u32,
) -> Result<Self, ProposedBlockError> {
if batches.len() > MAX_BATCHES_PER_BLOCK {
return Err(ProposedBlockError::TooManyBatches);
}
check_duplicate_batches(&batches)?;
check_timestamp_increases_monotonically(timestamp, block_inputs.prev_block_header())?;
check_batch_expiration(&batches, block_inputs.prev_block_header())?;
check_reference_block_partial_blockchain_consistency(
block_inputs.partial_blockchain(),
block_inputs.prev_block_header(),
)?;
check_batch_reference_blocks(
block_inputs.partial_blockchain(),
block_inputs.prev_block_header(),
&batches,
)?;
let (block_input_notes, block_erased_notes, block_output_notes) =
InputOutputNoteTracker::from_batches(
batches.iter(),
block_inputs.unauthenticated_note_proofs(),
block_inputs.partial_blockchain(),
block_inputs.prev_block_header(),
)?;
if let Some(nullifier) = block_input_notes
.iter()
.find_map(|note| (!note.is_authenticated()).then_some(note.nullifier()))
{
return Err(ProposedBlockError::UnauthenticatedNoteConsumed { nullifier });
}
let (prev_block_header, partial_blockchain, account_witnesses, mut nullifier_witnesses, _) =
block_inputs.into_parts();
remove_erased_nullifiers(&mut nullifier_witnesses, block_erased_notes.into_iter());
check_nullifiers(
&nullifier_witnesses,
block_input_notes.iter().map(InputNoteCommitment::nullifier),
)?;
let aggregator = AccountUpdateAggregator::from_batches(&batches)?;
let account_updated_witnesses = aggregator.into_update_witnesses(account_witnesses)?;
let output_note_batches = compute_block_output_notes(&batches, block_output_notes);
Ok(Self {
batches: OrderedBatches::new(batches),
timestamp,
account_updated_witnesses,
output_note_batches,
created_nullifiers: nullifier_witnesses,
partial_blockchain,
prev_block_header,
})
}
#[cfg(feature = "std")]
pub fn new(
block_inputs: BlockInputs,
batches: Vec<ProvenBatch>,
) -> Result<Self, ProposedBlockError> {
let timestamp_now: u32 = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.expect("now should be after 1970")
.as_secs()
.try_into()
.expect("timestamp should fit in a u32 before the year 2106");
let timestamp = timestamp_now.max(block_inputs.prev_block_header().timestamp() + 1);
Self::new_at(block_inputs, batches, timestamp)
}
pub fn transactions(&self) -> impl Iterator<Item = &TransactionHeader> {
self.batches
.as_slice()
.iter()
.flat_map(|batch| batch.transactions().as_slice().iter())
}
pub fn block_num(&self) -> BlockNumber {
self.partial_blockchain().chain_length() + 1
}
pub fn batches(&self) -> &OrderedBatches {
&self.batches
}
pub fn created_nullifiers(&self) -> &BTreeMap<Nullifier, NullifierWitness> {
&self.created_nullifiers
}
pub fn prev_block_header(&self) -> &BlockHeader {
&self.prev_block_header
}
pub fn partial_blockchain(&self) -> &PartialBlockchain {
&self.partial_blockchain
}
pub fn updated_accounts(&self) -> &[(AccountId, AccountUpdateWitness)] {
&self.account_updated_witnesses
}
pub fn timestamp(&self) -> u32 {
self.timestamp
}
pub fn output_note_batches(&self) -> &[OutputNoteBatch] {
&self.output_note_batches
}
#[allow(clippy::type_complexity)]
pub fn into_parts(
self,
) -> (
OrderedBatches,
Vec<(AccountId, AccountUpdateWitness)>,
Vec<OutputNoteBatch>,
BTreeMap<Nullifier, NullifierWitness>,
PartialBlockchain,
BlockHeader,
) {
(
self.batches,
self.account_updated_witnesses,
self.output_note_batches,
self.created_nullifiers,
self.partial_blockchain,
self.prev_block_header,
)
}
}
impl Serializable for ProposedBlock {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
self.batches.write_into(target);
self.timestamp.write_into(target);
self.account_updated_witnesses.write_into(target);
self.output_note_batches.write_into(target);
self.created_nullifiers.write_into(target);
self.partial_blockchain.write_into(target);
self.prev_block_header.write_into(target);
}
}
impl Deserializable for ProposedBlock {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let block = Self {
batches: OrderedBatches::read_from(source)?,
timestamp: u32::read_from(source)?,
account_updated_witnesses: <Vec<(AccountId, AccountUpdateWitness)>>::read_from(source)?,
output_note_batches: <Vec<OutputNoteBatch>>::read_from(source)?,
created_nullifiers: <BTreeMap<Nullifier, NullifierWitness>>::read_from(source)?,
partial_blockchain: PartialBlockchain::read_from(source)?,
prev_block_header: BlockHeader::read_from(source)?,
};
Ok(block)
}
}
fn check_duplicate_batches(batches: &[ProvenBatch]) -> Result<(), ProposedBlockError> {
let mut input_note_set = BTreeSet::new();
for batch in batches {
if !input_note_set.insert(batch.id()) {
return Err(ProposedBlockError::DuplicateBatch { batch_id: batch.id() });
}
}
Ok(())
}
fn check_timestamp_increases_monotonically(
provided_timestamp: u32,
prev_block_header: &BlockHeader,
) -> Result<(), ProposedBlockError> {
if provided_timestamp <= prev_block_header.timestamp() {
Err(ProposedBlockError::TimestampDoesNotIncreaseMonotonically {
provided_timestamp,
previous_timestamp: prev_block_header.timestamp(),
})
} else {
Ok(())
}
}
fn check_batch_expiration(
batches: &[ProvenBatch],
prev_block_header: &BlockHeader,
) -> Result<(), ProposedBlockError> {
let current_block_num = prev_block_header.block_num() + 1;
for batch in batches {
if batch.batch_expiration_block_num() < current_block_num {
return Err(ProposedBlockError::ExpiredBatch {
batch_id: batch.id(),
batch_expiration_block_num: batch.batch_expiration_block_num(),
current_block_num,
});
}
}
Ok(())
}
fn check_nullifiers(
nullifier_witnesses: &BTreeMap<Nullifier, NullifierWitness>,
block_input_notes: impl Iterator<Item = Nullifier>,
) -> Result<(), ProposedBlockError> {
for block_input_note in block_input_notes {
match nullifier_witnesses
.get(&block_input_note)
.and_then(|x| x.proof().get(&block_input_note.as_word()))
{
Some(nullifier_value) => {
if nullifier_value != EMPTY_WORD {
return Err(ProposedBlockError::NullifierSpent(block_input_note));
}
},
None => return Err(ProposedBlockError::NullifierProofMissing(block_input_note)),
}
}
Ok(())
}
fn remove_erased_nullifiers(
nullifier_witnesses: &mut BTreeMap<Nullifier, NullifierWitness>,
block_erased_notes: impl Iterator<Item = Nullifier>,
) {
for erased_note in block_erased_notes {
let _ = nullifier_witnesses.remove(&erased_note);
}
}
fn check_reference_block_partial_blockchain_consistency(
partial_blockchain: &PartialBlockchain,
prev_block_header: &BlockHeader,
) -> Result<(), ProposedBlockError> {
if partial_blockchain.chain_length() != prev_block_header.block_num() {
return Err(ProposedBlockError::ChainLengthNotEqualToPreviousBlockNumber {
chain_length: partial_blockchain.chain_length(),
prev_block_num: prev_block_header.block_num(),
});
}
let chain_commitment = partial_blockchain.peaks().hash_peaks();
if chain_commitment != prev_block_header.chain_commitment() {
return Err(ProposedBlockError::ChainRootNotEqualToPreviousBlockChainCommitment {
chain_commitment,
prev_block_chain_commitment: prev_block_header.chain_commitment(),
prev_block_num: prev_block_header.block_num(),
});
}
Ok(())
}
fn check_batch_reference_blocks(
partial_blockchain: &PartialBlockchain,
prev_block_header: &BlockHeader,
batches: &[ProvenBatch],
) -> Result<(), ProposedBlockError> {
for batch in batches {
let batch_reference_block_num = batch.reference_block_num();
if batch_reference_block_num != prev_block_header.block_num()
&& !partial_blockchain.contains_block(batch.reference_block_num())
{
return Err(ProposedBlockError::BatchReferenceBlockMissingFromChain {
reference_block_num: batch.reference_block_num(),
batch_id: batch.id(),
});
}
}
Ok(())
}
fn compute_block_output_notes(
batches: &[ProvenBatch],
mut block_output_notes: BTreeMap<NoteId, (BatchId, OutputNote)>,
) -> Vec<OutputNoteBatch> {
let mut block_output_note_batches = Vec::with_capacity(batches.len());
for batch in batches.iter() {
let batch_output_notes = compute_batch_output_notes(batch, &mut block_output_notes);
block_output_note_batches.push(batch_output_notes);
}
block_output_note_batches
}
fn compute_batch_output_notes(
batch: &ProvenBatch,
block_output_notes: &mut BTreeMap<NoteId, (BatchId, OutputNote)>,
) -> OutputNoteBatch {
let mut batch_output_notes = Vec::with_capacity(batch.output_notes().len());
for (note_idx, original_output_note) in batch.output_notes().iter().enumerate() {
if let Some((_batch_id, output_note)) =
block_output_notes.remove(&original_output_note.id())
{
debug_assert_eq!(
_batch_id,
batch.id(),
"batch that contained the note originally is no longer the batch that contains it according to the provided map"
);
batch_output_notes.push((note_idx, output_note));
}
}
batch_output_notes
}
struct AccountUpdateAggregator {
updates: BTreeMap<AccountId, BTreeMap<Word, (BatchAccountUpdate, BatchId)>>,
}
impl AccountUpdateAggregator {
fn new() -> Self {
Self { updates: BTreeMap::new() }
}
fn from_batches(batches: &[ProvenBatch]) -> Result<Self, ProposedBlockError> {
let mut update_aggregator = AccountUpdateAggregator::new();
for batch in batches {
for (account_id, update) in batch.account_updates() {
update_aggregator.insert_update(*account_id, batch.id(), update.clone())?;
}
}
Ok(update_aggregator)
}
fn insert_update(
&mut self,
account_id: AccountId,
batch_id: BatchId,
update: BatchAccountUpdate,
) -> Result<(), ProposedBlockError> {
if update.initial_state_commitment() == update.final_state_commitment() {
return Ok(());
};
if let Some((conflicting_update, conflicting_batch_id)) = self
.updates
.entry(account_id)
.or_default()
.insert(update.initial_state_commitment(), (update, batch_id))
{
return Err(ProposedBlockError::ConflictingBatchesUpdateSameAccount {
account_id,
initial_state_commitment: conflicting_update.initial_state_commitment(),
first_batch_id: conflicting_batch_id,
second_batch_id: batch_id,
});
}
Ok(())
}
fn into_update_witnesses(
self,
mut account_witnesses: BTreeMap<AccountId, AccountWitness>,
) -> Result<Vec<(AccountId, AccountUpdateWitness)>, ProposedBlockError> {
let mut account_update_witnesses = Vec::with_capacity(self.updates.len());
for (account_id, updates_map) in self.updates {
let witness = account_witnesses
.remove(&account_id)
.ok_or(ProposedBlockError::MissingAccountWitness(account_id))?;
let account_update_witness = Self::aggregate_account(account_id, witness, updates_map)?;
account_update_witnesses.push((account_id, account_update_witness));
}
Ok(account_update_witnesses)
}
fn aggregate_account(
account_id: AccountId,
initial_state_proof: AccountWitness,
mut updates: BTreeMap<Word, (BatchAccountUpdate, BatchId)>,
) -> Result<AccountUpdateWitness, ProposedBlockError> {
let initial_state_commitment = if account_id == initial_state_proof.id() {
initial_state_proof.state_commitment()
} else {
Word::empty()
};
let mut details: Option<AccountUpdateDetails> = None;
let mut current_commitment = initial_state_commitment;
while !updates.is_empty() {
let (update, _) = updates.remove(¤t_commitment).ok_or_else(|| {
ProposedBlockError::InconsistentAccountStateTransition {
account_id,
state_commitment: current_commitment,
remaining_state_commitments: updates.keys().copied().collect(),
}
})?;
current_commitment = update.final_state_commitment();
let update_details = update.into_update();
details = Some(match details {
None => update_details,
Some(details) => details.merge(update_details).map_err(|source| {
ProposedBlockError::AccountUpdateError { account_id, source: Box::new(source) }
})?,
});
}
Ok(AccountUpdateWitness::new(
initial_state_commitment,
current_commitment,
initial_state_proof,
details.expect("details should be Some as updates is guaranteed to not be empty"),
))
}
}