miden_protocol/block/proposed_block.rs
1use alloc::boxed::Box;
2use alloc::collections::{BTreeMap, BTreeSet};
3use alloc::vec::Vec;
4
5use crate::account::AccountId;
6use crate::account::delta::AccountUpdateDetails;
7use crate::batch::{
8 BatchAccountUpdate,
9 BatchId,
10 InputOutputNoteTracker,
11 OrderedBatches,
12 ProvenBatch,
13};
14use crate::block::account_tree::{AccountWitness, PartialAccountTree};
15use crate::block::block_inputs::BlockInputs;
16use crate::block::nullifier_tree::{NullifierWitness, PartialNullifierTree};
17use crate::block::{
18 AccountUpdateWitness,
19 BlockBody,
20 BlockHeader,
21 BlockNoteIndex,
22 BlockNoteTree,
23 BlockNumber,
24 OutputNoteBatch,
25};
26use crate::errors::ProposedBlockError;
27use crate::note::{NoteId, Nullifier};
28use crate::transaction::{
29 InputNoteCommitment,
30 OutputNote,
31 PartialBlockchain,
32 TransactionHeader,
33 TransactionKernel,
34};
35use crate::utils::serde::{
36 ByteReader,
37 ByteWriter,
38 Deserializable,
39 DeserializationError,
40 Serializable,
41};
42use crate::{EMPTY_WORD, MAX_BATCHES_PER_BLOCK, Word};
43
44// PROPOSED BLOCK
45// =================================================================================================
46
47/// A proposed block with many, but not all constraints of a
48/// [`ProvenBlock`](crate::block::ProvenBlock) enforced.
49///
50/// See [`ProposedBlock::new_at`] for details on the checks.
51#[derive(Debug, Clone)]
52pub struct ProposedBlock {
53 /// The transaction batches in this block.
54 batches: OrderedBatches,
55 /// The unix timestamp of the block in seconds.
56 timestamp: u32,
57 /// All account's [`AccountUpdateWitness`] that were updated in this block. See its docs for
58 /// details.
59 account_updated_witnesses: Vec<(AccountId, AccountUpdateWitness)>,
60 /// Note batches created by the transactions in this block.
61 ///
62 /// These are the output notes after note erasure has been done, so they represent the actual
63 /// output notes of the block.
64 ///
65 /// The length of this vector is guaranteed to be equal to the length of `batches` and the
66 /// inner batch of output notes may be empty if a batch did not create any notes.
67 output_note_batches: Vec<OutputNoteBatch>,
68 /// The nullifiers created by this block.
69 ///
70 /// These are the nullifiers of all input notes after note erasure has been done, so these are
71 /// the nullifiers of all _authenticated_ notes consumed in the block.
72 created_nullifiers: BTreeMap<Nullifier, NullifierWitness>,
73 /// The [`PartialBlockchain`] at the state of the previous block header. It is used to:
74 /// - authenticate unauthenticated notes whose note inclusion proof references a block.
75 /// - authenticate all reference blocks of the batches in this block.
76 partial_blockchain: PartialBlockchain,
77 /// The previous block's header which this block builds on top of.
78 ///
79 /// As part of proving the block, this header will be added to the next partial blockchain.
80 prev_block_header: BlockHeader,
81}
82
83impl ProposedBlock {
84 // CONSTRUCTORS
85 // --------------------------------------------------------------------------------------------
86
87 /// Creates a new proposed block from the provided [`BlockInputs`], transaction batches and
88 /// timestamp.
89 ///
90 /// This checks most of the constraints of a block and computes most of the data structure
91 /// updates except for the more expensive tree updates (nullifier, account and chain
92 /// commitment).
93 ///
94 /// # Errors
95 ///
96 /// Returns an error if any of the following conditions are met.
97 ///
98 /// ## Batches
99 ///
100 /// - The number of batches exceeds [`MAX_BATCHES_PER_BLOCK`].
101 /// - There are duplicate batches, i.e. they have the same [`BatchId`].
102 /// - The expiration block number of any batch is less than the block number of the currently
103 /// proposed block.
104 ///
105 /// ## Chain
106 ///
107 /// - The length of the [`PartialBlockchain`] in the block inputs is not equal to the previous
108 /// block header in the block inputs.
109 /// - The [`PartialBlockchain`]'s chain commitment is not equal to the
110 /// [`BlockHeader::chain_commitment`] of the previous block header.
111 ///
112 /// ## Notes
113 ///
114 /// Note that, in the following, the set of authenticated notes includes unauthenticated notes
115 /// that have been authenticated.
116 ///
117 /// - The union of all input notes across all batches contain duplicates.
118 /// - The union of all output notes across all batches contain duplicates.
119 /// - There is an unauthenticated input note and an output note with the same note ID but their
120 /// note commitments are different (i.e. their metadata is different).
121 /// - There is a note inclusion proof for an unauthenticated note whose referenced block is not
122 /// in the [`PartialBlockchain`].
123 /// - The note inclusion proof for an unauthenticated is invalid.
124 /// - There are any unauthenticated notes for which no note inclusion proof is provided.
125 /// - A [`NullifierWitness`] is missing for an authenticated note.
126 /// - If the [`NullifierWitness`] for an authenticated note proves that the note was already
127 /// consumed.
128 ///
129 /// ## Accounts
130 ///
131 /// - An [`AccountWitness`] is missing for an account updated by a batch.
132 /// - Any two batches update the same account from the same state. For example, if batch 1
133 /// updates some account from state A to B and batch 2 updates it from A to F, then those
134 /// batches conflict as they both start from the same initial state but produce a fork in the
135 /// account's state.
136 /// - Account updates from different batches cannot be brought in a contiguous order. For
137 /// example, if a batch 1 updates an account from state A to C, and a batch 2 updates it from
138 /// D to F, then the state transition from C to D is missing. Note that this does not mean,
139 /// that batches must be provided in an order where account updates chain together in the
140 /// order of the batches, which would generally be an impossible requirement to fulfill.
141 /// - Account updates cannot be merged, i.e. if [`AccountUpdateDetails::merge`] fails on the
142 /// updates from two batches.
143 ///
144 /// ## Time
145 ///
146 /// - The given `timestamp` does not increase monotonically compared to the previous block
147 /// header' timestamp.
148 pub fn new_at(
149 block_inputs: BlockInputs,
150 batches: Vec<ProvenBatch>,
151 timestamp: u32,
152 ) -> Result<Self, ProposedBlockError> {
153 // Check for duplicate and max number of batches.
154 // --------------------------------------------------------------------------------------------
155
156 if batches.len() > MAX_BATCHES_PER_BLOCK {
157 return Err(ProposedBlockError::TooManyBatches);
158 }
159
160 check_duplicate_batches(&batches)?;
161
162 // Check timestamp increases monotonically.
163 // --------------------------------------------------------------------------------------------
164
165 check_timestamp_increases_monotonically(timestamp, block_inputs.prev_block_header())?;
166
167 // Check for batch expiration.
168 // --------------------------------------------------------------------------------------------
169
170 check_batch_expiration(&batches, block_inputs.prev_block_header())?;
171
172 // Check for consistency between the partial blockchain and the referenced previous block.
173 // --------------------------------------------------------------------------------------------
174
175 check_reference_block_partial_blockchain_consistency(
176 block_inputs.partial_blockchain(),
177 block_inputs.prev_block_header(),
178 )?;
179
180 // Check every block referenced by a batch is in the partial blockchain.
181 // --------------------------------------------------------------------------------------------
182
183 check_batch_reference_blocks(
184 block_inputs.partial_blockchain(),
185 block_inputs.prev_block_header(),
186 &batches,
187 )?;
188
189 // Check for duplicates in the input and output notes and compute the input and output notes
190 // of the block by erasing notes that are created and consumed within this block as well as
191 // authenticating unauthenticated notes.
192 // --------------------------------------------------------------------------------------------
193
194 let (block_input_notes, block_erased_notes, block_output_notes) =
195 InputOutputNoteTracker::from_batches(
196 batches.iter(),
197 block_inputs.unauthenticated_note_proofs(),
198 block_inputs.partial_blockchain(),
199 block_inputs.prev_block_header(),
200 )?;
201
202 // All unauthenticated notes must be erased or authenticated by now.
203 if let Some(nullifier) = block_input_notes
204 .iter()
205 .find_map(|note| (!note.is_authenticated()).then_some(note.nullifier()))
206 {
207 return Err(ProposedBlockError::UnauthenticatedNoteConsumed { nullifier });
208 }
209
210 // Check for nullifiers proofs and unspent nullifiers.
211 // --------------------------------------------------------------------------------------------
212
213 let (prev_block_header, partial_blockchain, account_witnesses, mut nullifier_witnesses, _) =
214 block_inputs.into_parts();
215
216 // Remove nullifiers of erased notes, so we only add the nullifiers of actual input notes to
217 // the proposed block.
218 remove_erased_nullifiers(&mut nullifier_witnesses, block_erased_notes.into_iter());
219
220 // Check against computed block_input_notes which also contain unauthenticated notes that
221 // have been authenticated.
222 check_nullifiers(
223 &nullifier_witnesses,
224 block_input_notes.iter().map(InputNoteCommitment::nullifier),
225 )?;
226
227 // Aggregate account updates across batches.
228 // --------------------------------------------------------------------------------------------
229
230 let aggregator = AccountUpdateAggregator::from_batches(&batches)?;
231 let account_updated_witnesses = aggregator.into_update_witnesses(account_witnesses)?;
232
233 // Compute the block's output note batches from the individual batch output notes.
234 // --------------------------------------------------------------------------------------------
235
236 let output_note_batches = compute_block_output_notes(&batches, block_output_notes);
237
238 // Build proposed blocks from parts.
239 // --------------------------------------------------------------------------------------------
240
241 Ok(Self {
242 batches: OrderedBatches::new(batches),
243 timestamp,
244 account_updated_witnesses,
245 output_note_batches,
246 created_nullifiers: nullifier_witnesses,
247 partial_blockchain,
248 prev_block_header,
249 })
250 }
251
252 /// Creates a new proposed block from the provided [`BlockInputs`] and transaction batches.
253 ///
254 /// Equivalent to [`ProposedBlock::new_at`] except that the timestamp of the proposed block is
255 /// set to the current system time or the previous block header's timestamp + 1, whichever
256 /// is greater. This guarantees that the timestamp increases monotonically.
257 ///
258 /// See the [`ProposedBlock::new_at`] for details on errors and other constraints.
259 #[cfg(feature = "std")]
260 pub fn new(
261 block_inputs: BlockInputs,
262 batches: Vec<ProvenBatch>,
263 ) -> Result<Self, ProposedBlockError> {
264 let timestamp_now: u32 = std::time::SystemTime::now()
265 .duration_since(std::time::UNIX_EPOCH)
266 .expect("now should be after 1970")
267 .as_secs()
268 .try_into()
269 .expect("timestamp should fit in a u32 before the year 2106");
270
271 let timestamp = timestamp_now.max(block_inputs.prev_block_header().timestamp() + 1);
272
273 Self::new_at(block_inputs, batches, timestamp)
274 }
275
276 // ACCESSORS
277 // --------------------------------------------------------------------------------------------
278
279 /// Returns the block number of this proposed block.
280 pub fn block_num(&self) -> BlockNumber {
281 // The chain length is the length at the state of the previous block header, so we have to
282 // add one.
283 self.partial_blockchain().chain_length() + 1
284 }
285
286 /// Returns a reference to the previous block header that this block builds on top of.
287 pub fn prev_block_header(&self) -> &BlockHeader {
288 &self.prev_block_header
289 }
290
291 /// Returns the [`PartialBlockchain`] that this block contains.
292 pub fn partial_blockchain(&self) -> &PartialBlockchain {
293 &self.partial_blockchain
294 }
295
296 /// Returns a reference to the slice of transaction batches in this block.
297 pub fn batches(&self) -> &OrderedBatches {
298 &self.batches
299 }
300
301 /// Returns an iterator over all transactions in the block.
302 pub fn transactions(&self) -> impl Iterator<Item = &TransactionHeader> {
303 self.batches
304 .as_slice()
305 .iter()
306 .flat_map(|batch| batch.transactions().as_slice().iter())
307 }
308
309 /// Returns the map of nullifiers to their proofs from the proposed block.
310 pub fn created_nullifiers(&self) -> &BTreeMap<Nullifier, NullifierWitness> {
311 &self.created_nullifiers
312 }
313
314 /// Returns a reference to the slice of accounts updated in this block.
315 pub fn updated_accounts(&self) -> &[(AccountId, AccountUpdateWitness)] {
316 &self.account_updated_witnesses
317 }
318
319 /// Returns a slice of the [`OutputNoteBatch`] of each batch in this block.
320 pub fn output_note_batches(&self) -> &[OutputNoteBatch] {
321 &self.output_note_batches
322 }
323
324 /// Returns the timestamp of this block.
325 pub fn timestamp(&self) -> u32 {
326 self.timestamp
327 }
328
329 // COMMITMENT COMPUTATIONS
330 // --------------------------------------------------------------------------------------------
331
332 /// Computes the new account tree root after the given updates.
333 pub fn compute_account_root(&self) -> Result<Word, ProposedBlockError> {
334 // If no accounts were updated, the account tree root is unchanged.
335 if self.account_updated_witnesses.is_empty() {
336 return Ok(self.prev_block_header.account_root());
337 }
338
339 // First reconstruct the current account tree from the provided merkle paths.
340 // If a witness points to a leaf where multiple account IDs share the same prefix, this will
341 // return an error.
342 let mut partial_account_tree = PartialAccountTree::with_witnesses(
343 self.account_updated_witnesses
344 .iter()
345 .map(|(_, update_witness)| update_witness.to_witness()),
346 )
347 .map_err(|source| ProposedBlockError::AccountWitnessTracking { source })?;
348
349 // Check the account tree root in the previous block header matches the reconstructed tree's
350 // root.
351 if self.prev_block_header.account_root() != partial_account_tree.root() {
352 return Err(ProposedBlockError::StaleAccountTreeRoot {
353 prev_block_account_root: self.prev_block_header.account_root(),
354 stale_account_root: partial_account_tree.root(),
355 });
356 }
357
358 // Second, update the account tree by inserting the new final account state commitments to
359 // compute the new root of the account tree.
360 // If an account ID's prefix already exists in the tree, this will return an error.
361 // Note that we have inserted all witnesses that we want to update into the partial account
362 // tree, so we should not run into the untracked key error.
363 partial_account_tree
364 .upsert_state_commitments(self.account_updated_witnesses.iter().map(
365 |(account_id, update_witness)| {
366 (*account_id, update_witness.final_state_commitment())
367 },
368 ))
369 .map_err(|source| ProposedBlockError::AccountIdPrefixDuplicate { source })?;
370
371 Ok(partial_account_tree.root())
372 }
373
374 /// Computes the new nullifier root by inserting the nullifier witnesses into a partial
375 /// nullifier tree and marking each nullifier as spent in the given block number.
376 pub fn compute_nullifier_root(&self) -> Result<Word, ProposedBlockError> {
377 // If no nullifiers were created, the nullifier tree root is unchanged.
378 if self.created_nullifiers.is_empty() {
379 return Ok(self.prev_block_header.nullifier_root());
380 }
381
382 // First, reconstruct the current nullifier tree with the merkle paths of the nullifiers we
383 // want to update.
384 // Due to the guarantees of ProposedBlock we can safely assume that each nullifier is mapped
385 // to its corresponding nullifier witness, so we don't have to check again whether
386 // they match.
387 let mut partial_nullifier_tree =
388 PartialNullifierTree::with_witnesses(self.created_nullifiers().values().cloned())
389 .map_err(ProposedBlockError::NullifierWitnessRootMismatch)?;
390
391 // Check the nullifier tree root in the previous block header matches the reconstructed
392 // tree's root.
393 if self.prev_block_header.nullifier_root() != partial_nullifier_tree.root() {
394 return Err(ProposedBlockError::StaleNullifierTreeRoot {
395 prev_block_nullifier_root: self.prev_block_header.nullifier_root(),
396 stale_nullifier_root: partial_nullifier_tree.root(),
397 });
398 }
399
400 // Second, mark each nullifier as spent in the tree. Note that checking whether each
401 // nullifier is unspent is checked as part of constructing the proposed block.
402
403 // SAFETY: As mentioned above, we can safely assume that each nullifier's witness was
404 // added and every nullifier should be tracked by the partial tree and
405 // therefore updatable.
406 partial_nullifier_tree
407 .mark_spent_all(self.created_nullifiers.keys().copied(), self.block_num())
408 .expect("nullifiers' merkle path should have been added to the partial tree and the nullifiers should be unspent");
409
410 Ok(partial_nullifier_tree.root())
411 }
412
413 /// Compute the block note tree from the output note batches.
414 pub fn compute_block_note_tree(&self) -> BlockNoteTree {
415 let output_notes_iter =
416 self.output_note_batches.iter().enumerate().flat_map(|(batch_idx, notes)| {
417 notes.iter().map(move |(note_idx_in_batch, note)| {
418 (
419 // SAFETY: The proposed block contains at most the max allowed number of
420 // batches and each batch is guaranteed to contain at most
421 // the max allowed number of output notes.
422 BlockNoteIndex::new(batch_idx, *note_idx_in_batch).expect(
423 "max batches in block and max notes in batches should be enforced",
424 ),
425 note.id(),
426 note.metadata(),
427 )
428 })
429 });
430
431 // SAFETY: We only construct proposed blocks that:
432 // - do not contain duplicates
433 // - contain at most the max allowed number of batches and each batch is guaranteed to
434 // contain at most the max allowed number of output notes.
435 BlockNoteTree::with_entries(output_notes_iter)
436 .expect("the output notes of the block should not contain duplicates and contain at most the allowed maximum")
437 }
438
439 /// Adds the commitment of the previous block header to the partial blockchain to compute the
440 /// new chain commitment.
441 pub fn compute_chain_commitment(&self) -> Word {
442 let mut partial_blockchain = self.partial_blockchain.clone();
443 // SAFETY: This does not panic as long as the block header we're adding is the next one in
444 // the chain which is validated as part of constructing a `ProposedBlock`.
445 partial_blockchain.add_block(&self.prev_block_header, true);
446 partial_blockchain.peaks().hash_peaks()
447 }
448
449 // STATE MUTATORS
450 // --------------------------------------------------------------------------------------------
451
452 /// Builds a [`BlockHeader`] and [`BlockBody`] by computing the following from the state
453 /// updates encapsulated by the provided [`ProposedBlock`]:
454 /// - the account root;
455 /// - the nullifier root;
456 /// - the note root;
457 /// - the transaction commitment; and
458 /// - the chain commitment.
459 ///
460 /// The returned block header contains the same validator public key as the previous block, as
461 /// provided by the proposed block.
462 pub fn into_header_and_body(self) -> Result<(BlockHeader, BlockBody), ProposedBlockError> {
463 // Get fields from the proposed block before it is consumed.
464 let block_num = self.block_num();
465 let timestamp = self.timestamp();
466 let prev_block_header = self.prev_block_header().clone();
467
468 // Insert the state commitments of updated accounts into the account tree to compute its new
469 // root.
470 let new_account_root = self.compute_account_root()?;
471
472 // Insert the created nullifiers into the nullifier tree to compute its new root.
473 let new_nullifier_root = self.compute_nullifier_root()?;
474
475 // Compute the root of the block note tree.
476 let note_tree = self.compute_block_note_tree();
477 let note_root = note_tree.root();
478
479 // Insert the previous block header into the block partial blockchain to get the new chain
480 // commitment.
481 // TODO: Consider avoiding the partial blockchain clone by constructing `BlockBody` from its
482 // raw parts, which does not require the partial blockchain.
483 let new_chain_commitment = self.compute_chain_commitment();
484
485 // Construct the block body from the proposed block.
486 let body = BlockBody::from(self);
487
488 // Construct the header.
489 let tx_commitment = body.transaction_commitment();
490 let prev_block_commitment = prev_block_header.commitment();
491
492 // For now we copy the parameters of the previous header, which means the parameters set on
493 // the genesis block will be passed through. Eventually, the contained base fees will be
494 // updated based on the demand in the currently proposed block.
495 let fee_parameters = prev_block_header.fee_parameters().clone();
496
497 // Currently undefined and reserved for future use.
498 // See miden-base/1155.
499 let version = 0;
500 let tx_kernel_commitment = TransactionKernel.to_commitment();
501 let header = BlockHeader::new(
502 version,
503 prev_block_commitment,
504 block_num,
505 new_chain_commitment,
506 new_account_root,
507 new_nullifier_root,
508 note_root,
509 tx_commitment,
510 tx_kernel_commitment,
511 prev_block_header.validator_key().clone(),
512 fee_parameters,
513 timestamp,
514 );
515
516 Ok((header, body))
517 }
518
519 /// Consumes self and returns the non-[`Copy`] parts of the block.
520 #[allow(clippy::type_complexity)]
521 pub fn into_parts(
522 self,
523 ) -> (
524 OrderedBatches,
525 Vec<(AccountId, AccountUpdateWitness)>,
526 Vec<OutputNoteBatch>,
527 BTreeMap<Nullifier, NullifierWitness>,
528 PartialBlockchain,
529 BlockHeader,
530 ) {
531 (
532 self.batches,
533 self.account_updated_witnesses,
534 self.output_note_batches,
535 self.created_nullifiers,
536 self.partial_blockchain,
537 self.prev_block_header,
538 )
539 }
540}
541
542// SERIALIZATION
543// ================================================================================================
544
545impl Serializable for ProposedBlock {
546 fn write_into<W: ByteWriter>(&self, target: &mut W) {
547 self.batches.write_into(target);
548 self.timestamp.write_into(target);
549 self.account_updated_witnesses.write_into(target);
550 self.output_note_batches.write_into(target);
551 self.created_nullifiers.write_into(target);
552 self.partial_blockchain.write_into(target);
553 self.prev_block_header.write_into(target);
554 }
555}
556
557impl Deserializable for ProposedBlock {
558 fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
559 let block = Self {
560 batches: OrderedBatches::read_from(source)?,
561 timestamp: u32::read_from(source)?,
562 account_updated_witnesses: <Vec<(AccountId, AccountUpdateWitness)>>::read_from(source)?,
563 output_note_batches: <Vec<OutputNoteBatch>>::read_from(source)?,
564 created_nullifiers: <BTreeMap<Nullifier, NullifierWitness>>::read_from(source)?,
565 partial_blockchain: PartialBlockchain::read_from(source)?,
566 prev_block_header: BlockHeader::read_from(source)?,
567 };
568
569 Ok(block)
570 }
571}
572
573// HELPER FUNCTIONS
574// ================================================================================================
575
576fn check_duplicate_batches(batches: &[ProvenBatch]) -> Result<(), ProposedBlockError> {
577 let mut input_note_set = BTreeSet::new();
578
579 for batch in batches {
580 if !input_note_set.insert(batch.id()) {
581 return Err(ProposedBlockError::DuplicateBatch { batch_id: batch.id() });
582 }
583 }
584
585 Ok(())
586}
587
588fn check_timestamp_increases_monotonically(
589 provided_timestamp: u32,
590 prev_block_header: &BlockHeader,
591) -> Result<(), ProposedBlockError> {
592 if provided_timestamp <= prev_block_header.timestamp() {
593 Err(ProposedBlockError::TimestampDoesNotIncreaseMonotonically {
594 provided_timestamp,
595 previous_timestamp: prev_block_header.timestamp(),
596 })
597 } else {
598 Ok(())
599 }
600}
601
602/// Checks whether any of the batches is expired and can no longer be included in this block.
603///
604/// To illustrate, a batch which expired at block 4 cannot be included in block 5, but if it
605/// expires at block 5 then it can still be included in block 5.
606fn check_batch_expiration(
607 batches: &[ProvenBatch],
608 prev_block_header: &BlockHeader,
609) -> Result<(), ProposedBlockError> {
610 let current_block_num = prev_block_header.block_num() + 1;
611
612 for batch in batches {
613 if batch.batch_expiration_block_num() < current_block_num {
614 return Err(ProposedBlockError::ExpiredBatch {
615 batch_id: batch.id(),
616 batch_expiration_block_num: batch.batch_expiration_block_num(),
617 current_block_num,
618 });
619 }
620 }
621
622 Ok(())
623}
624
625/// Check that each nullifier in the block has a proof provided and that the nullifier is
626/// unspent. The proofs are required to update the nullifier tree.
627fn check_nullifiers(
628 nullifier_witnesses: &BTreeMap<Nullifier, NullifierWitness>,
629 block_input_notes: impl Iterator<Item = Nullifier>,
630) -> Result<(), ProposedBlockError> {
631 for block_input_note in block_input_notes {
632 match nullifier_witnesses
633 .get(&block_input_note)
634 .and_then(|x| x.proof().get(&block_input_note.as_word()))
635 {
636 Some(nullifier_value) => {
637 if nullifier_value != EMPTY_WORD {
638 return Err(ProposedBlockError::NullifierSpent(block_input_note));
639 }
640 },
641 // If the nullifier witnesses did not contain a proof for this nullifier or the provided
642 // proof was not for this nullifier, then it's an error.
643 None => return Err(ProposedBlockError::NullifierProofMissing(block_input_note)),
644 }
645 }
646
647 Ok(())
648}
649
650/// Removes the nullifiers from the nullifier witnesses that were erased (i.e. created and consumed
651/// within the block).
652fn remove_erased_nullifiers(
653 nullifier_witnesses: &mut BTreeMap<Nullifier, NullifierWitness>,
654 block_erased_notes: impl Iterator<Item = Nullifier>,
655) {
656 for erased_note in block_erased_notes {
657 // We do not check that the nullifier was actually present to allow the block inputs to
658 // not include a nullifier that is known to belong to an erased note.
659 let _ = nullifier_witnesses.remove(&erased_note);
660 }
661}
662
663/// Checks consistency between the previous block header and the provided partial blockchain.
664///
665/// This checks that:
666/// - the chain length of the partial blockchain is equal to the block number of the previous block
667/// header, i.e. the partial blockchain's latest block is the previous' blocks reference block.
668/// The previous block header will be added to the partial blockchain as part of constructing the
669/// current block.
670/// - the root of the partial blockchain is equivalent to the chain commitment of the previous block
671/// header.
672fn check_reference_block_partial_blockchain_consistency(
673 partial_blockchain: &PartialBlockchain,
674 prev_block_header: &BlockHeader,
675) -> Result<(), ProposedBlockError> {
676 // Make sure that the current partial blockchain has blocks up to prev_block_header - 1, i.e.
677 // its chain length is equal to the block number of the previous block header.
678 if partial_blockchain.chain_length() != prev_block_header.block_num() {
679 return Err(ProposedBlockError::ChainLengthNotEqualToPreviousBlockNumber {
680 chain_length: partial_blockchain.chain_length(),
681 prev_block_num: prev_block_header.block_num(),
682 });
683 }
684
685 let chain_commitment = partial_blockchain.peaks().hash_peaks();
686 if chain_commitment != prev_block_header.chain_commitment() {
687 return Err(ProposedBlockError::ChainRootNotEqualToPreviousBlockChainCommitment {
688 chain_commitment,
689 prev_block_chain_commitment: prev_block_header.chain_commitment(),
690 prev_block_num: prev_block_header.block_num(),
691 });
692 }
693
694 Ok(())
695}
696
697/// Check that each block referenced by a batch in the block has an entry in the partial blockchain,
698/// except if the referenced block is the same as the previous block, referenced by the block.
699fn check_batch_reference_blocks(
700 partial_blockchain: &PartialBlockchain,
701 prev_block_header: &BlockHeader,
702 batches: &[ProvenBatch],
703) -> Result<(), ProposedBlockError> {
704 for batch in batches {
705 let batch_reference_block_num = batch.reference_block_num();
706 if batch_reference_block_num != prev_block_header.block_num()
707 && !partial_blockchain.contains_block(batch.reference_block_num())
708 {
709 return Err(ProposedBlockError::BatchReferenceBlockMissingFromChain {
710 reference_block_num: batch.reference_block_num(),
711 batch_id: batch.id(),
712 });
713 }
714 }
715
716 Ok(())
717}
718
719/// Computes the block's output notes from the batches of notes of each batch in the block.
720///
721/// We pass in `block_output_notes` which is the full set of output notes of the block, with output
722/// notes erased that are consumed by some batch in the block.
723///
724/// The batch output notes of each proven batch however contain all the notes that it creates,
725/// including ones that were potentially erased in `block_output_notes`. This means we have to
726/// make the batch output notes consistent with `block_output_notes` by removing the erased notes.
727/// Then it accurately represents what output notes the batch actually creates as part of the block.
728///
729/// Returns the set of [`OutputNoteBatch`]es that each batch creates.
730fn compute_block_output_notes(
731 batches: &[ProvenBatch],
732 mut block_output_notes: BTreeMap<NoteId, (BatchId, OutputNote)>,
733) -> Vec<OutputNoteBatch> {
734 let mut block_output_note_batches = Vec::with_capacity(batches.len());
735
736 for batch in batches.iter() {
737 let batch_output_notes = compute_batch_output_notes(batch, &mut block_output_notes);
738 block_output_note_batches.push(batch_output_notes);
739 }
740
741 block_output_note_batches
742}
743
744/// Computes the output note of the given batch. This is essentially the batch's output notes minus
745/// all erased notes.
746///
747/// If a note in the batch's output notes is not present in the block output notes map it means it
748/// was erased and should therefore not be added to the batch's output notes. If it is present, it
749/// is added to the set of output notes of this batch.
750///
751/// The output note set is returned.
752fn compute_batch_output_notes(
753 batch: &ProvenBatch,
754 block_output_notes: &mut BTreeMap<NoteId, (BatchId, OutputNote)>,
755) -> OutputNoteBatch {
756 // The len of the batch output notes is an upper bound of how many notes the batch could've
757 // produced so we reserve that much space to avoid reallocation.
758 let mut batch_output_notes = Vec::with_capacity(batch.output_notes().len());
759
760 for (note_idx, original_output_note) in batch.output_notes().iter().enumerate() {
761 // If block_output_notes no longer contains a note it means it was erased and we do not
762 // include it in the output notes of the current batch. We include the original index of the
763 // note in the batch so we can later correctly construct the block note tree. This index is
764 // needed because we want to be able to construct the block note tree in two ways: 1) By
765 // inserting the individual batch note trees (with erased notes removed) as subtrees into an
766 // empty block note tree or 2) by iterating the set `OutputNoteBatch`es. If we did not store
767 // the index, then the second method would assume a contiguous layout of output notes and
768 // result in a different tree than the first method.
769 //
770 // Note that because we disallow duplicate output notes, if this map contains the
771 // original note id, then we can be certain it was created by this batch and should stay
772 // in the tree. In other words, there is no ambiguity where a note originated from.
773 if let Some((_batch_id, output_note)) =
774 block_output_notes.remove(&original_output_note.id())
775 {
776 debug_assert_eq!(
777 _batch_id,
778 batch.id(),
779 "batch that contained the note originally is no longer the batch that contains it according to the provided map"
780 );
781 batch_output_notes.push((note_idx, output_note));
782 }
783 }
784
785 batch_output_notes
786}
787
788// ACCOUNT UPDATE AGGREGATOR
789// ================================================================================================
790
791struct AccountUpdateAggregator {
792 /// The map from each account to the map of each of its updates, where the digest is the state
793 /// commitment from which the contained update starts.
794 /// An invariant of this field is that if the outer map has an entry for some account, the
795 /// inner update map is guaranteed to not be empty as well.
796 updates: BTreeMap<AccountId, BTreeMap<Word, (BatchAccountUpdate, BatchId)>>,
797}
798
799impl AccountUpdateAggregator {
800 fn new() -> Self {
801 Self { updates: BTreeMap::new() }
802 }
803
804 /// Aggregates all updates for the same account and stores each update indexed by its initial
805 /// state commitment so we can easily retrieve them in the next step. This lets us
806 /// chronologically order the updates per account across batches.
807 fn from_batches(batches: &[ProvenBatch]) -> Result<Self, ProposedBlockError> {
808 let mut update_aggregator = AccountUpdateAggregator::new();
809
810 for batch in batches {
811 for (account_id, update) in batch.account_updates() {
812 update_aggregator.insert_update(*account_id, batch.id(), update.clone())?;
813 }
814 }
815
816 Ok(update_aggregator)
817 }
818
819 /// Inserts the update from one batch for a specific account into the map of updates.
820 fn insert_update(
821 &mut self,
822 account_id: AccountId,
823 batch_id: BatchId,
824 update: BatchAccountUpdate,
825 ) -> Result<(), ProposedBlockError> {
826 // As a special case, a NOOP transaction (i.e. one where the initial and final state
827 // commitment is the same) can just be ignored without changing the outcome.
828 // Without this early return, such a transaction would conflict with other state-updating
829 // transactions, because there would be two transactions that update the account from
830 // the same initial state commitment.
831 if update.initial_state_commitment() == update.final_state_commitment() {
832 return Ok(());
833 };
834
835 if let Some((conflicting_update, conflicting_batch_id)) = self
836 .updates
837 .entry(account_id)
838 .or_default()
839 .insert(update.initial_state_commitment(), (update, batch_id))
840 {
841 return Err(ProposedBlockError::ConflictingBatchesUpdateSameAccount {
842 account_id,
843 initial_state_commitment: conflicting_update.initial_state_commitment(),
844 first_batch_id: conflicting_batch_id,
845 second_batch_id: batch_id,
846 });
847 }
848
849 Ok(())
850 }
851
852 /// Consumes self and aggregates the account updates from all contained accounts.
853 /// For each updated account an entry in `account_witnesses` must be present.
854 fn into_update_witnesses(
855 self,
856 mut account_witnesses: BTreeMap<AccountId, AccountWitness>,
857 ) -> Result<Vec<(AccountId, AccountUpdateWitness)>, ProposedBlockError> {
858 let mut account_update_witnesses = Vec::with_capacity(self.updates.len());
859
860 for (account_id, updates_map) in self.updates {
861 let witness = account_witnesses
862 .remove(&account_id)
863 .ok_or(ProposedBlockError::MissingAccountWitness(account_id))?;
864
865 let account_update_witness = Self::aggregate_account(account_id, witness, updates_map)?;
866
867 account_update_witnesses.push((account_id, account_update_witness));
868 }
869
870 Ok(account_update_witnesses)
871 }
872
873 /// Build the update for a single account from the provided map of updates, where each entry is
874 /// the state from which the update starts. This chains updates for this account together in a
875 /// chronological order using the state commitments to link them.
876 fn aggregate_account(
877 account_id: AccountId,
878 initial_state_proof: AccountWitness,
879 mut updates: BTreeMap<Word, (BatchAccountUpdate, BatchId)>,
880 ) -> Result<AccountUpdateWitness, ProposedBlockError> {
881 // The account witness could prove inclusion of a different ID in which case the initial
882 // state commitment of the current ID is the empty word.
883 let initial_state_commitment = if account_id == initial_state_proof.id() {
884 initial_state_proof.state_commitment()
885 } else {
886 Word::empty()
887 };
888
889 let mut details: Option<AccountUpdateDetails> = None;
890
891 let mut current_commitment = initial_state_commitment;
892 while !updates.is_empty() {
893 let (update, _) = updates.remove(¤t_commitment).ok_or_else(|| {
894 ProposedBlockError::InconsistentAccountStateTransition {
895 account_id,
896 state_commitment: current_commitment,
897 remaining_state_commitments: updates.keys().copied().collect(),
898 }
899 })?;
900
901 current_commitment = update.final_state_commitment();
902 let update_details = update.into_update();
903
904 details = Some(match details {
905 None => update_details,
906 Some(details) => details.merge(update_details).map_err(|source| {
907 ProposedBlockError::AccountUpdateError { account_id, source: Box::new(source) }
908 })?,
909 });
910 }
911
912 Ok(AccountUpdateWitness::new(
913 initial_state_commitment,
914 current_commitment,
915 initial_state_proof,
916 details.expect("details should be Some as updates is guaranteed to not be empty"),
917 ))
918 }
919}