miden_protocol/block/proposed_block.rs
1use alloc::boxed::Box;
2use alloc::collections::{BTreeMap, BTreeSet};
3use alloc::vec::Vec;
4
5use crate::account::AccountId;
6use crate::account::delta::AccountUpdateDetails;
7use crate::batch::{
8 BatchAccountUpdate,
9 BatchId,
10 InputOutputNoteTracker,
11 OrderedBatches,
12 ProvenBatch,
13};
14use crate::block::account_tree::{AccountWitness, PartialAccountTree};
15use crate::block::block_inputs::BlockInputs;
16use crate::block::nullifier_tree::{NullifierWitness, PartialNullifierTree};
17use crate::block::{
18 AccountUpdateWitness,
19 BlockBody,
20 BlockHeader,
21 BlockNoteIndex,
22 BlockNoteTree,
23 BlockNumber,
24 OutputNoteBatch,
25};
26use crate::errors::ProposedBlockError;
27use crate::note::{NoteId, Nullifier};
28use crate::transaction::{
29 InputNoteCommitment,
30 OutputNote,
31 PartialBlockchain,
32 TransactionHeader,
33 TransactionKernel,
34};
35use crate::utils::serde::{
36 ByteReader,
37 ByteWriter,
38 Deserializable,
39 DeserializationError,
40 Serializable,
41};
42use crate::{EMPTY_WORD, MAX_BATCHES_PER_BLOCK, Word};
43
44// PROPOSED BLOCK
45// =================================================================================================
46
47/// A proposed block with many, but not all constraints of a
48/// [`ProvenBlock`](crate::block::ProvenBlock) enforced.
49///
50/// See [`ProposedBlock::new_at`] for details on the checks.
51#[derive(Debug, Clone)]
52pub struct ProposedBlock {
53 /// The transaction batches in this block.
54 batches: OrderedBatches,
55 /// The unix timestamp of the block in seconds.
56 timestamp: u32,
57 /// All account's [`AccountUpdateWitness`] that were updated in this block. See its docs for
58 /// details.
59 account_updated_witnesses: Vec<(AccountId, AccountUpdateWitness)>,
60 /// Note batches created by the transactions in this block.
61 ///
62 /// These are the output notes after note erasure has been done, so they represent the actual
63 /// output notes of the block.
64 ///
65 /// The length of this vector is guaranteed to be equal to the length of `batches` and the
66 /// inner batch of output notes may be empty if a batch did not create any notes.
67 output_note_batches: Vec<OutputNoteBatch>,
68 /// The nullifiers created by this block.
69 ///
70 /// These are the nullifiers of all input notes after note erasure has been done, so these are
71 /// the nullifiers of all _authenticated_ notes consumed in the block.
72 created_nullifiers: BTreeMap<Nullifier, NullifierWitness>,
73 /// The [`PartialBlockchain`] at the state of the previous block header. It is used to:
74 /// - authenticate unauthenticated notes whose note inclusion proof references a block.
75 /// - authenticate all reference blocks of the batches in this block.
76 partial_blockchain: PartialBlockchain,
77 /// The previous block's header which this block builds on top of.
78 ///
79 /// As part of proving the block, this header will be added to the next partial blockchain.
80 prev_block_header: BlockHeader,
81}
82
83impl ProposedBlock {
84 // CONSTRUCTORS
85 // --------------------------------------------------------------------------------------------
86
87 /// Creates a new proposed block from the provided [`BlockInputs`], transaction batches and
88 /// timestamp.
89 ///
90 /// This checks most of the constraints of a block and computes most of the data structure
91 /// updates except for the more expensive tree updates (nullifier, account and chain
92 /// commitment).
93 ///
94 /// # Errors
95 ///
96 /// Returns an error if any of the following conditions are met.
97 ///
98 /// ## Batches
99 ///
100 /// - The number of batches exceeds [`MAX_BATCHES_PER_BLOCK`].
101 /// - There are duplicate batches, i.e. they have the same [`BatchId`].
102 /// - The expiration block number of any batch is less than the block number of the currently
103 /// proposed block.
104 ///
105 /// ## Chain
106 ///
107 /// - The length of the [`PartialBlockchain`] in the block inputs is not equal to the previous
108 /// block header in the block inputs.
109 /// - The [`PartialBlockchain`]'s chain commitment is not equal to the
110 /// [`BlockHeader::chain_commitment`] of the previous block header.
111 ///
112 /// ## Notes
113 ///
114 /// Note that, in the following, the set of authenticated notes includes unauthenticated notes
115 /// that have been authenticated.
116 ///
117 /// - The union of all input notes across all batches contain duplicates.
118 /// - The union of all output notes across all batches contain duplicates.
119 /// - There is an unauthenticated input note and an output note with the same note ID but their
120 /// note commitments are different (i.e. their metadata is different).
121 /// - There is a note inclusion proof for an unauthenticated note whose referenced block is not
122 /// in the [`PartialBlockchain`].
123 /// - The note inclusion proof for an unauthenticated is invalid.
124 /// - There are any unauthenticated notes for which no note inclusion proof is provided.
125 /// - A [`NullifierWitness`] is missing for an authenticated note.
126 /// - If the [`NullifierWitness`] for an authenticated note proves that the note was already
127 /// consumed.
128 ///
129 /// ## Accounts
130 ///
131 /// - An [`AccountWitness`] is missing for an account updated by a batch.
132 /// - Any two batches update the same account from the same state. For example, if batch 1
133 /// updates some account from state A to B and batch 2 updates it from A to F, then those
134 /// batches conflict as they both start from the same initial state but produce a fork in the
135 /// account's state.
136 /// - Account updates from different batches cannot be brought in a contiguous order. For
137 /// example, if a batch 1 updates an account from state A to C, and a batch 2 updates it from
138 /// D to F, then the state transition from C to D is missing. Note that this does not mean,
139 /// that batches must be provided in an order where account updates chain together in the
140 /// order of the batches, which would generally be an impossible requirement to fulfill.
141 /// - Account updates cannot be merged, i.e. if [`AccountUpdateDetails::merge`] fails on the
142 /// updates from two batches.
143 ///
144 /// ## Time
145 ///
146 /// - The given `timestamp` does not increase monotonically compared to the previous block
147 /// header' timestamp.
148 pub fn new_at(
149 block_inputs: BlockInputs,
150 batches: Vec<ProvenBatch>,
151 timestamp: u32,
152 ) -> Result<Self, ProposedBlockError> {
153 // Check for duplicate and max number of batches.
154 // --------------------------------------------------------------------------------------------
155
156 if batches.len() > MAX_BATCHES_PER_BLOCK {
157 return Err(ProposedBlockError::TooManyBatches);
158 }
159
160 check_duplicate_batches(&batches)?;
161
162 // Check timestamp increases monotonically.
163 // --------------------------------------------------------------------------------------------
164
165 check_timestamp_increases_monotonically(timestamp, block_inputs.prev_block_header())?;
166
167 // Check for batch expiration.
168 // --------------------------------------------------------------------------------------------
169
170 check_batch_expiration(&batches, block_inputs.prev_block_header())?;
171
172 // Check for consistency between the partial blockchain and the referenced previous block.
173 // --------------------------------------------------------------------------------------------
174
175 check_reference_block_partial_blockchain_consistency(
176 block_inputs.partial_blockchain(),
177 block_inputs.prev_block_header(),
178 )?;
179
180 // Check every block referenced by a batch is in the partial blockchain.
181 // --------------------------------------------------------------------------------------------
182
183 check_batch_reference_blocks(
184 block_inputs.partial_blockchain(),
185 block_inputs.prev_block_header(),
186 &batches,
187 )?;
188
189 // Check for duplicates in the input and output notes and compute the input and output notes
190 // of the block by erasing notes that are created and consumed within this block as well as
191 // authenticating unauthenticated notes.
192 // --------------------------------------------------------------------------------------------
193
194 let (block_input_notes, block_erased_notes, block_output_notes) =
195 InputOutputNoteTracker::from_batches(
196 batches.iter(),
197 block_inputs.unauthenticated_note_proofs(),
198 block_inputs.partial_blockchain(),
199 block_inputs.prev_block_header(),
200 )?;
201
202 // All unauthenticated notes must be erased or authenticated by now.
203 if let Some(nullifier) = block_input_notes
204 .iter()
205 .find_map(|note| (!note.is_authenticated()).then_some(note.nullifier()))
206 {
207 return Err(ProposedBlockError::UnauthenticatedNoteConsumed { nullifier });
208 }
209
210 // Check for nullifiers proofs and unspent nullifiers.
211 // --------------------------------------------------------------------------------------------
212
213 let (prev_block_header, partial_blockchain, account_witnesses, mut nullifier_witnesses, _) =
214 block_inputs.into_parts();
215
216 // Remove nullifiers of erased notes, so we only add the nullifiers of actual input notes to
217 // the proposed block.
218 remove_erased_nullifiers(&mut nullifier_witnesses, block_erased_notes.into_iter());
219
220 // Check against computed block_input_notes which also contain unauthenticated notes that
221 // have been authenticated.
222 check_nullifiers(
223 &nullifier_witnesses,
224 block_input_notes.iter().map(InputNoteCommitment::nullifier),
225 )?;
226
227 // Aggregate account updates across batches.
228 // --------------------------------------------------------------------------------------------
229
230 let aggregator = AccountUpdateAggregator::from_batches(&batches)?;
231 let account_updated_witnesses = aggregator.into_update_witnesses(account_witnesses)?;
232
233 // Compute the block's output note batches from the individual batch output notes.
234 // --------------------------------------------------------------------------------------------
235
236 let output_note_batches = compute_block_output_notes(&batches, block_output_notes);
237
238 // Build proposed blocks from parts.
239 // --------------------------------------------------------------------------------------------
240
241 Ok(Self {
242 batches: OrderedBatches::new(batches),
243 timestamp,
244 account_updated_witnesses,
245 output_note_batches,
246 created_nullifiers: nullifier_witnesses,
247 partial_blockchain,
248 prev_block_header,
249 })
250 }
251
252 /// Creates a new proposed block from the provided [`BlockInputs`] and transaction batches.
253 ///
254 /// Equivalent to [`ProposedBlock::new_at`] except that the timestamp of the proposed block is
255 /// set to the current system time or the previous block header's timestamp + 1, whichever
256 /// is greater. This guarantees that the timestamp increases monotonically.
257 ///
258 /// See the [`ProposedBlock::new_at`] for details on errors and other constraints.
259 #[cfg(feature = "std")]
260 pub fn new(
261 block_inputs: BlockInputs,
262 batches: Vec<ProvenBatch>,
263 ) -> Result<Self, ProposedBlockError> {
264 let timestamp_now: u32 = std::time::SystemTime::now()
265 .duration_since(std::time::UNIX_EPOCH)
266 .expect("now should be after 1970")
267 .as_secs()
268 .try_into()
269 .expect("timestamp should fit in a u32 before the year 2106");
270
271 let timestamp = timestamp_now.max(block_inputs.prev_block_header().timestamp() + 1);
272
273 Self::new_at(block_inputs, batches, timestamp)
274 }
275
276 // ACCESSORS
277 // --------------------------------------------------------------------------------------------
278
279 /// Returns the block number of this proposed block.
280 pub fn block_num(&self) -> BlockNumber {
281 // The chain length is the length at the state of the previous block header, so we have to
282 // add one.
283 self.partial_blockchain().chain_length() + 1
284 }
285
286 /// Returns a reference to the previous block header that this block builds on top of.
287 pub fn prev_block_header(&self) -> &BlockHeader {
288 &self.prev_block_header
289 }
290
291 /// Returns the [`PartialBlockchain`] that this block contains.
292 pub fn partial_blockchain(&self) -> &PartialBlockchain {
293 &self.partial_blockchain
294 }
295
296 /// Returns a reference to the slice of transaction batches in this block.
297 pub fn batches(&self) -> &OrderedBatches {
298 &self.batches
299 }
300
301 /// Returns an iterator over all transactions in the block.
302 pub fn transactions(&self) -> impl Iterator<Item = &TransactionHeader> {
303 self.batches
304 .as_slice()
305 .iter()
306 .flat_map(|batch| batch.transactions().as_slice().iter())
307 }
308
309 /// Returns the map of nullifiers to their proofs from the proposed block.
310 pub fn created_nullifiers(&self) -> &BTreeMap<Nullifier, NullifierWitness> {
311 &self.created_nullifiers
312 }
313
314 /// Returns a reference to the slice of accounts updated in this block.
315 pub fn updated_accounts(&self) -> &[(AccountId, AccountUpdateWitness)] {
316 &self.account_updated_witnesses
317 }
318
319 /// Returns a slice of the [`OutputNoteBatch`] of each batch in this block.
320 pub fn output_note_batches(&self) -> &[OutputNoteBatch] {
321 &self.output_note_batches
322 }
323
324 /// Returns the timestamp of this block.
325 pub fn timestamp(&self) -> u32 {
326 self.timestamp
327 }
328
329 // COMMITMENT COMPUTATIONS
330 // --------------------------------------------------------------------------------------------
331
332 /// Computes the new account tree root after the given updates.
333 pub fn compute_account_root(&self) -> Result<Word, ProposedBlockError> {
334 // If no accounts were updated, the account tree root is unchanged.
335 if self.account_updated_witnesses.is_empty() {
336 return Ok(self.prev_block_header.account_root());
337 }
338
339 // First reconstruct the current account tree from the provided merkle paths.
340 // If a witness points to a leaf where multiple account IDs share the same prefix, this will
341 // return an error.
342 let mut partial_account_tree = PartialAccountTree::with_witnesses(
343 self.account_updated_witnesses
344 .iter()
345 .map(|(_, update_witness)| update_witness.to_witness()),
346 )
347 .map_err(|source| ProposedBlockError::AccountWitnessTracking { source })?;
348
349 // Check the account tree root in the previous block header matches the reconstructed tree's
350 // root.
351 if self.prev_block_header.account_root() != partial_account_tree.root() {
352 return Err(ProposedBlockError::StaleAccountTreeRoot {
353 prev_block_account_root: self.prev_block_header.account_root(),
354 stale_account_root: partial_account_tree.root(),
355 });
356 }
357
358 // Second, update the account tree by inserting the new final account state commitments to
359 // compute the new root of the account tree.
360 // If an account ID's prefix already exists in the tree, this will return an error.
361 // Note that we have inserted all witnesses that we want to update into the partial account
362 // tree, so we should not run into the untracked key error.
363 partial_account_tree
364 .upsert_state_commitments(self.account_updated_witnesses.iter().map(
365 |(account_id, update_witness)| {
366 (*account_id, update_witness.final_state_commitment())
367 },
368 ))
369 .map_err(|source| ProposedBlockError::AccountIdPrefixDuplicate { source })?;
370
371 Ok(partial_account_tree.root())
372 }
373
374 /// Computes the new nullifier root by inserting the nullifier witnesses into a partial
375 /// nullifier tree and marking each nullifier as spent in the given block number.
376 pub fn compute_nullifier_root(&self) -> Result<Word, ProposedBlockError> {
377 // If no nullifiers were created, the nullifier tree root is unchanged.
378 if self.created_nullifiers.is_empty() {
379 return Ok(self.prev_block_header.nullifier_root());
380 }
381
382 // First, reconstruct the current nullifier tree with the merkle paths of the nullifiers we
383 // want to update.
384 // Due to the guarantees of ProposedBlock we can safely assume that each nullifier is mapped
385 // to its corresponding nullifier witness, so we don't have to check again whether
386 // they match.
387 let mut partial_nullifier_tree =
388 PartialNullifierTree::with_witnesses(self.created_nullifiers().values().cloned())
389 .map_err(ProposedBlockError::NullifierWitnessRootMismatch)?;
390
391 // Check the nullifier tree root in the previous block header matches the reconstructed
392 // tree's root.
393 if self.prev_block_header.nullifier_root() != partial_nullifier_tree.root() {
394 return Err(ProposedBlockError::StaleNullifierTreeRoot {
395 prev_block_nullifier_root: self.prev_block_header.nullifier_root(),
396 stale_nullifier_root: partial_nullifier_tree.root(),
397 });
398 }
399
400 // Second, mark each nullifier as spent in the tree. Note that checking whether each
401 // nullifier is unspent is checked as part of constructing the proposed block.
402
403 // SAFETY: As mentioned above, we can safely assume that each nullifier's witness was
404 // added and every nullifier should be tracked by the partial tree and
405 // therefore updatable.
406 partial_nullifier_tree
407 .mark_spent_all(self.created_nullifiers.keys().copied(), self.block_num())
408 .expect("nullifiers' merkle path should have been added to the partial tree and the nullifiers should be unspent");
409
410 Ok(partial_nullifier_tree.root())
411 }
412
413 /// Compute the block note tree from the output note batches.
414 pub fn compute_block_note_tree(&self) -> BlockNoteTree {
415 let output_notes_iter =
416 self.output_note_batches.iter().enumerate().flat_map(|(batch_idx, notes)| {
417 notes.iter().map(move |(note_idx_in_batch, note)| {
418 (
419 // SAFETY: The proposed block contains at most the max allowed number of
420 // batches and each batch is guaranteed to contain at most
421 // the max allowed number of output notes.
422 BlockNoteIndex::new(batch_idx, *note_idx_in_batch).expect(
423 "max batches in block and max notes in batches should be enforced",
424 ),
425 note.id(),
426 note.metadata(),
427 )
428 })
429 });
430
431 // SAFETY: We only construct proposed blocks that:
432 // - do not contain duplicates
433 // - contain at most the max allowed number of batches and each batch is guaranteed to
434 // contain at most the max allowed number of output notes.
435 BlockNoteTree::with_entries(output_notes_iter)
436 .expect("the output notes of the block should not contain duplicates and contain at most the allowed maximum")
437 }
438
439 /// Adds the commitment of the previous block header to the partial blockchain to compute the
440 /// new chain commitment.
441 pub fn compute_chain_commitment(&self) -> Word {
442 let mut partial_blockchain = self.partial_blockchain.clone();
443 // SAFETY: This does not panic as long as the block header we're adding is the next one in
444 // the chain which is validated as part of constructing a `ProposedBlock`.
445 partial_blockchain.add_block(&self.prev_block_header, true);
446 partial_blockchain.peaks().hash_peaks()
447 }
448
449 // STATE MUTATORS
450 // --------------------------------------------------------------------------------------------
451
452 /// Builds a [`BlockHeader`] and [`BlockBody`] by computing the following from the state
453 /// updates encapsulated by the provided [`ProposedBlock`]:
454 /// - the account root;
455 /// - the nullifier root;
456 /// - the note root;
457 /// - the transaction commitment; and
458 /// - the chain commitment.
459 ///
460 /// The returned block header contains the same validator public key as the previous block, as
461 /// provided by the proposed block.
462 ///
463 /// # Errors
464 ///
465 /// Returns an error if any of the following conditions are met.
466 ///
467 /// ## Account Tree
468 ///
469 /// - An account witness cannot be used to reconstruct the partial account tree (e.g. it points
470 /// to a leaf where multiple account IDs share the same prefix).
471 /// - The account root in the previous block header does not match the root of the reconstructed
472 /// partial account tree (stale account tree root).
473 /// - An account ID's prefix already exists in the tree when inserting the new state commitments
474 /// (duplicate account ID prefix).
475 ///
476 /// ## Nullifier Tree
477 ///
478 /// - The nullifier witnesses cannot be used to reconstruct the partial nullifier tree (root
479 /// mismatch between witnesses).
480 /// - The nullifier root in the previous block header does not match the root of the
481 /// reconstructed partial nullifier tree (stale nullifier tree root).
482 pub fn into_header_and_body(self) -> Result<(BlockHeader, BlockBody), ProposedBlockError> {
483 // Get fields from the proposed block before it is consumed.
484 let block_num = self.block_num();
485 let timestamp = self.timestamp();
486 let prev_block_header = self.prev_block_header().clone();
487
488 // Insert the state commitments of updated accounts into the account tree to compute its new
489 // root.
490 let new_account_root = self.compute_account_root()?;
491
492 // Insert the created nullifiers into the nullifier tree to compute its new root.
493 let new_nullifier_root = self.compute_nullifier_root()?;
494
495 // Compute the root of the block note tree.
496 let note_tree = self.compute_block_note_tree();
497 let note_root = note_tree.root();
498
499 // Insert the previous block header into the block partial blockchain to get the new chain
500 // commitment.
501 // TODO: Consider avoiding the partial blockchain clone by constructing `BlockBody` from its
502 // raw parts, which does not require the partial blockchain.
503 let new_chain_commitment = self.compute_chain_commitment();
504
505 // Construct the block body from the proposed block.
506 let body = BlockBody::from(self);
507
508 // Construct the header.
509 let tx_commitment = body.transaction_commitment();
510 let prev_block_commitment = prev_block_header.commitment();
511
512 // For now we copy the parameters of the previous header, which means the parameters set on
513 // the genesis block will be passed through. Eventually, the contained base fees will be
514 // updated based on the demand in the currently proposed block.
515 let fee_parameters = prev_block_header.fee_parameters().clone();
516
517 // Currently undefined and reserved for future use.
518 // See https://github.com/0xMiden/protocol/issues/1155.
519 let version = 0;
520 let tx_kernel_commitment = TransactionKernel.to_commitment();
521 let header = BlockHeader::new(
522 version,
523 prev_block_commitment,
524 block_num,
525 new_chain_commitment,
526 new_account_root,
527 new_nullifier_root,
528 note_root,
529 tx_commitment,
530 tx_kernel_commitment,
531 prev_block_header.validator_key().clone(),
532 fee_parameters,
533 timestamp,
534 );
535
536 Ok((header, body))
537 }
538
539 /// Consumes self and returns the non-[`Copy`] parts of the block.
540 #[allow(clippy::type_complexity)]
541 pub fn into_parts(
542 self,
543 ) -> (
544 OrderedBatches,
545 Vec<(AccountId, AccountUpdateWitness)>,
546 Vec<OutputNoteBatch>,
547 BTreeMap<Nullifier, NullifierWitness>,
548 PartialBlockchain,
549 BlockHeader,
550 ) {
551 (
552 self.batches,
553 self.account_updated_witnesses,
554 self.output_note_batches,
555 self.created_nullifiers,
556 self.partial_blockchain,
557 self.prev_block_header,
558 )
559 }
560}
561
562// SERIALIZATION
563// ================================================================================================
564
565impl Serializable for ProposedBlock {
566 fn write_into<W: ByteWriter>(&self, target: &mut W) {
567 self.batches.write_into(target);
568 self.timestamp.write_into(target);
569 self.account_updated_witnesses.write_into(target);
570 self.output_note_batches.write_into(target);
571 self.created_nullifiers.write_into(target);
572 self.partial_blockchain.write_into(target);
573 self.prev_block_header.write_into(target);
574 }
575}
576
577impl Deserializable for ProposedBlock {
578 fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
579 let block = Self {
580 batches: OrderedBatches::read_from(source)?,
581 timestamp: u32::read_from(source)?,
582 account_updated_witnesses: <Vec<(AccountId, AccountUpdateWitness)>>::read_from(source)?,
583 output_note_batches: <Vec<OutputNoteBatch>>::read_from(source)?,
584 created_nullifiers: <BTreeMap<Nullifier, NullifierWitness>>::read_from(source)?,
585 partial_blockchain: PartialBlockchain::read_from(source)?,
586 prev_block_header: BlockHeader::read_from(source)?,
587 };
588
589 Ok(block)
590 }
591}
592
593// HELPER FUNCTIONS
594// ================================================================================================
595
596fn check_duplicate_batches(batches: &[ProvenBatch]) -> Result<(), ProposedBlockError> {
597 let mut input_note_set = BTreeSet::new();
598
599 for batch in batches {
600 if !input_note_set.insert(batch.id()) {
601 return Err(ProposedBlockError::DuplicateBatch { batch_id: batch.id() });
602 }
603 }
604
605 Ok(())
606}
607
608fn check_timestamp_increases_monotonically(
609 provided_timestamp: u32,
610 prev_block_header: &BlockHeader,
611) -> Result<(), ProposedBlockError> {
612 if provided_timestamp <= prev_block_header.timestamp() {
613 Err(ProposedBlockError::TimestampDoesNotIncreaseMonotonically {
614 provided_timestamp,
615 previous_timestamp: prev_block_header.timestamp(),
616 })
617 } else {
618 Ok(())
619 }
620}
621
622/// Checks whether any of the batches is expired and can no longer be included in this block.
623///
624/// To illustrate, a batch which expired at block 4 cannot be included in block 5, but if it
625/// expires at block 5 then it can still be included in block 5.
626fn check_batch_expiration(
627 batches: &[ProvenBatch],
628 prev_block_header: &BlockHeader,
629) -> Result<(), ProposedBlockError> {
630 let current_block_num = prev_block_header.block_num() + 1;
631
632 for batch in batches {
633 if batch.batch_expiration_block_num() < current_block_num {
634 return Err(ProposedBlockError::ExpiredBatch {
635 batch_id: batch.id(),
636 batch_expiration_block_num: batch.batch_expiration_block_num(),
637 current_block_num,
638 });
639 }
640 }
641
642 Ok(())
643}
644
645/// Check that each nullifier in the block has a proof provided and that the nullifier is
646/// unspent. The proofs are required to update the nullifier tree.
647fn check_nullifiers(
648 nullifier_witnesses: &BTreeMap<Nullifier, NullifierWitness>,
649 block_input_notes: impl Iterator<Item = Nullifier>,
650) -> Result<(), ProposedBlockError> {
651 for block_input_note in block_input_notes {
652 match nullifier_witnesses
653 .get(&block_input_note)
654 .and_then(|x| x.proof().get(&block_input_note.as_word()))
655 {
656 Some(nullifier_value) => {
657 if nullifier_value != EMPTY_WORD {
658 return Err(ProposedBlockError::NullifierSpent(block_input_note));
659 }
660 },
661 // If the nullifier witnesses did not contain a proof for this nullifier or the provided
662 // proof was not for this nullifier, then it's an error.
663 None => return Err(ProposedBlockError::NullifierProofMissing(block_input_note)),
664 }
665 }
666
667 Ok(())
668}
669
670/// Removes the nullifiers from the nullifier witnesses that were erased (i.e. created and consumed
671/// within the block).
672fn remove_erased_nullifiers(
673 nullifier_witnesses: &mut BTreeMap<Nullifier, NullifierWitness>,
674 block_erased_notes: impl Iterator<Item = Nullifier>,
675) {
676 for erased_note in block_erased_notes {
677 // We do not check that the nullifier was actually present to allow the block inputs to
678 // not include a nullifier that is known to belong to an erased note.
679 let _ = nullifier_witnesses.remove(&erased_note);
680 }
681}
682
683/// Checks consistency between the previous block header and the provided partial blockchain.
684///
685/// This checks that:
686/// - the chain length of the partial blockchain is equal to the block number of the previous block
687/// header, i.e. the partial blockchain's latest block is the previous' blocks reference block.
688/// The previous block header will be added to the partial blockchain as part of constructing the
689/// current block.
690/// - the root of the partial blockchain is equivalent to the chain commitment of the previous block
691/// header.
692fn check_reference_block_partial_blockchain_consistency(
693 partial_blockchain: &PartialBlockchain,
694 prev_block_header: &BlockHeader,
695) -> Result<(), ProposedBlockError> {
696 // Make sure that the current partial blockchain has blocks up to prev_block_header - 1, i.e.
697 // its chain length is equal to the block number of the previous block header.
698 if partial_blockchain.chain_length() != prev_block_header.block_num() {
699 return Err(ProposedBlockError::ChainLengthNotEqualToPreviousBlockNumber {
700 chain_length: partial_blockchain.chain_length(),
701 prev_block_num: prev_block_header.block_num(),
702 });
703 }
704
705 let chain_commitment = partial_blockchain.peaks().hash_peaks();
706 if chain_commitment != prev_block_header.chain_commitment() {
707 return Err(ProposedBlockError::ChainRootNotEqualToPreviousBlockChainCommitment {
708 chain_commitment,
709 prev_block_chain_commitment: prev_block_header.chain_commitment(),
710 prev_block_num: prev_block_header.block_num(),
711 });
712 }
713
714 Ok(())
715}
716
717/// Check that each block referenced by a batch in the block has an entry in the partial blockchain,
718/// except if the referenced block is the same as the previous block, referenced by the block.
719fn check_batch_reference_blocks(
720 partial_blockchain: &PartialBlockchain,
721 prev_block_header: &BlockHeader,
722 batches: &[ProvenBatch],
723) -> Result<(), ProposedBlockError> {
724 for batch in batches {
725 let batch_reference_block_num = batch.reference_block_num();
726 if batch_reference_block_num != prev_block_header.block_num()
727 && !partial_blockchain.contains_block(batch.reference_block_num())
728 {
729 return Err(ProposedBlockError::BatchReferenceBlockMissingFromChain {
730 reference_block_num: batch.reference_block_num(),
731 batch_id: batch.id(),
732 });
733 }
734 }
735
736 Ok(())
737}
738
739/// Computes the block's output notes from the batches of notes of each batch in the block.
740///
741/// We pass in `block_output_notes` which is the full set of output notes of the block, with output
742/// notes erased that are consumed by some batch in the block.
743///
744/// The batch output notes of each proven batch however contain all the notes that it creates,
745/// including ones that were potentially erased in `block_output_notes`. This means we have to
746/// make the batch output notes consistent with `block_output_notes` by removing the erased notes.
747/// Then it accurately represents what output notes the batch actually creates as part of the block.
748///
749/// Returns the set of [`OutputNoteBatch`]es that each batch creates.
750fn compute_block_output_notes(
751 batches: &[ProvenBatch],
752 mut block_output_notes: BTreeMap<NoteId, (BatchId, OutputNote)>,
753) -> Vec<OutputNoteBatch> {
754 let mut block_output_note_batches = Vec::with_capacity(batches.len());
755
756 for batch in batches.iter() {
757 let batch_output_notes = compute_batch_output_notes(batch, &mut block_output_notes);
758 block_output_note_batches.push(batch_output_notes);
759 }
760
761 block_output_note_batches
762}
763
764/// Computes the output note of the given batch. This is essentially the batch's output notes minus
765/// all erased notes.
766///
767/// If a note in the batch's output notes is not present in the block output notes map it means it
768/// was erased and should therefore not be added to the batch's output notes. If it is present, it
769/// is added to the set of output notes of this batch.
770///
771/// The output note set is returned.
772fn compute_batch_output_notes(
773 batch: &ProvenBatch,
774 block_output_notes: &mut BTreeMap<NoteId, (BatchId, OutputNote)>,
775) -> OutputNoteBatch {
776 // The len of the batch output notes is an upper bound of how many notes the batch could've
777 // produced so we reserve that much space to avoid reallocation.
778 let mut batch_output_notes = Vec::with_capacity(batch.output_notes().len());
779
780 for (note_idx, original_output_note) in batch.output_notes().iter().enumerate() {
781 // If block_output_notes no longer contains a note it means it was erased and we do not
782 // include it in the output notes of the current batch. We include the original index of the
783 // note in the batch so we can later correctly construct the block note tree. This index is
784 // needed because we want to be able to construct the block note tree in two ways: 1) By
785 // inserting the individual batch note trees (with erased notes removed) as subtrees into an
786 // empty block note tree or 2) by iterating the set `OutputNoteBatch`es. If we did not store
787 // the index, then the second method would assume a contiguous layout of output notes and
788 // result in a different tree than the first method.
789 //
790 // Note that because we disallow duplicate output notes, if this map contains the
791 // original note id, then we can be certain it was created by this batch and should stay
792 // in the tree. In other words, there is no ambiguity where a note originated from.
793 if let Some((_batch_id, output_note)) =
794 block_output_notes.remove(&original_output_note.id())
795 {
796 debug_assert_eq!(
797 _batch_id,
798 batch.id(),
799 "batch that contained the note originally is no longer the batch that contains it according to the provided map"
800 );
801 batch_output_notes.push((note_idx, output_note));
802 }
803 }
804
805 batch_output_notes
806}
807
808// ACCOUNT UPDATE AGGREGATOR
809// ================================================================================================
810
811struct AccountUpdateAggregator {
812 /// The map from each account to the map of each of its updates, where the digest is the state
813 /// commitment from which the contained update starts.
814 /// An invariant of this field is that if the outer map has an entry for some account, the
815 /// inner update map is guaranteed to not be empty as well.
816 updates: BTreeMap<AccountId, BTreeMap<Word, (BatchAccountUpdate, BatchId)>>,
817}
818
819impl AccountUpdateAggregator {
820 fn new() -> Self {
821 Self { updates: BTreeMap::new() }
822 }
823
824 /// Aggregates all updates for the same account and stores each update indexed by its initial
825 /// state commitment so we can easily retrieve them in the next step. This lets us
826 /// chronologically order the updates per account across batches.
827 fn from_batches(batches: &[ProvenBatch]) -> Result<Self, ProposedBlockError> {
828 let mut update_aggregator = AccountUpdateAggregator::new();
829
830 for batch in batches {
831 for (account_id, update) in batch.account_updates() {
832 update_aggregator.insert_update(*account_id, batch.id(), update.clone())?;
833 }
834 }
835
836 Ok(update_aggregator)
837 }
838
839 /// Inserts the update from one batch for a specific account into the map of updates.
840 fn insert_update(
841 &mut self,
842 account_id: AccountId,
843 batch_id: BatchId,
844 update: BatchAccountUpdate,
845 ) -> Result<(), ProposedBlockError> {
846 // As a special case, a NOOP transaction (i.e. one where the initial and final state
847 // commitment is the same) can just be ignored without changing the outcome.
848 // Without this early return, such a transaction would conflict with other state-updating
849 // transactions, because there would be two transactions that update the account from
850 // the same initial state commitment.
851 if update.initial_state_commitment() == update.final_state_commitment() {
852 return Ok(());
853 };
854
855 if let Some((conflicting_update, conflicting_batch_id)) = self
856 .updates
857 .entry(account_id)
858 .or_default()
859 .insert(update.initial_state_commitment(), (update, batch_id))
860 {
861 return Err(ProposedBlockError::ConflictingBatchesUpdateSameAccount {
862 account_id,
863 initial_state_commitment: conflicting_update.initial_state_commitment(),
864 first_batch_id: conflicting_batch_id,
865 second_batch_id: batch_id,
866 });
867 }
868
869 Ok(())
870 }
871
872 /// Consumes self and aggregates the account updates from all contained accounts.
873 /// For each updated account an entry in `account_witnesses` must be present.
874 fn into_update_witnesses(
875 self,
876 mut account_witnesses: BTreeMap<AccountId, AccountWitness>,
877 ) -> Result<Vec<(AccountId, AccountUpdateWitness)>, ProposedBlockError> {
878 let mut account_update_witnesses = Vec::with_capacity(self.updates.len());
879
880 for (account_id, updates_map) in self.updates {
881 let witness = account_witnesses
882 .remove(&account_id)
883 .ok_or(ProposedBlockError::MissingAccountWitness(account_id))?;
884
885 let account_update_witness = Self::aggregate_account(account_id, witness, updates_map)?;
886
887 account_update_witnesses.push((account_id, account_update_witness));
888 }
889
890 Ok(account_update_witnesses)
891 }
892
893 /// Build the update for a single account from the provided map of updates, where each entry is
894 /// the state from which the update starts. This chains updates for this account together in a
895 /// chronological order using the state commitments to link them.
896 fn aggregate_account(
897 account_id: AccountId,
898 initial_state_proof: AccountWitness,
899 mut updates: BTreeMap<Word, (BatchAccountUpdate, BatchId)>,
900 ) -> Result<AccountUpdateWitness, ProposedBlockError> {
901 // The account witness could prove inclusion of a different ID in which case the initial
902 // state commitment of the current ID is the empty word.
903 let initial_state_commitment = if account_id == initial_state_proof.id() {
904 initial_state_proof.state_commitment()
905 } else {
906 Word::empty()
907 };
908
909 let mut details: Option<AccountUpdateDetails> = None;
910
911 let mut current_commitment = initial_state_commitment;
912 while !updates.is_empty() {
913 let (update, _) = updates.remove(¤t_commitment).ok_or_else(|| {
914 ProposedBlockError::InconsistentAccountStateTransition {
915 account_id,
916 state_commitment: current_commitment,
917 remaining_state_commitments: updates.keys().copied().collect(),
918 }
919 })?;
920
921 current_commitment = update.final_state_commitment();
922 let update_details = update.into_update();
923
924 details = Some(match details {
925 None => update_details,
926 Some(details) => details.merge(update_details).map_err(|source| {
927 ProposedBlockError::AccountUpdateError { account_id, source: Box::new(source) }
928 })?,
929 });
930 }
931
932 Ok(AccountUpdateWitness::new(
933 initial_state_commitment,
934 current_commitment,
935 initial_state_proof,
936 details.expect("details should be Some as updates is guaranteed to not be empty"),
937 ))
938 }
939}