miden_objects/block/
proposed_block.rs

1use alloc::{
2    boxed::Box,
3    collections::{BTreeMap, BTreeSet},
4    vec::Vec,
5};
6
7use crate::{
8    Digest, EMPTY_WORD, MAX_BATCHES_PER_BLOCK,
9    account::{AccountId, delta::AccountUpdateDetails},
10    batch::{BatchAccountUpdate, BatchId, InputOutputNoteTracker, OrderedBatches, ProvenBatch},
11    block::{
12        AccountUpdateWitness, AccountWitness, BlockHeader, BlockNumber, NullifierWitness,
13        OutputNoteBatch, block_inputs::BlockInputs,
14    },
15    errors::ProposedBlockError,
16    note::{NoteId, Nullifier},
17    transaction::{InputNoteCommitment, OutputNote, PartialBlockchain, TransactionHeader},
18    utils::serde::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable},
19};
20
21// PROPOSED BLOCK
22// =================================================================================================
23
24/// A proposed block with many, but not all constraints of a
25/// [`ProvenBlock`](crate::block::ProvenBlock) enforced.
26///
27/// See [`ProposedBlock::new_at`] for details on the checks.
28#[derive(Debug, Clone)]
29pub struct ProposedBlock {
30    /// The transaction batches in this block.
31    batches: OrderedBatches,
32    /// The unix timestamp of the block in seconds.
33    timestamp: u32,
34    /// All account's [`AccountUpdateWitness`] that were updated in this block. See its docs for
35    /// details.
36    account_updated_witnesses: Vec<(AccountId, AccountUpdateWitness)>,
37    /// Note batches created by the transactions in this block.
38    ///
39    /// These are the output notes after note erasure has been done, so they represent the actual
40    /// output notes of the block.
41    ///
42    /// The length of this vector is guaranteed to be equal to the length of `batches` and the
43    /// inner batch of output notes may be empty if a batch did not create any notes.
44    output_note_batches: Vec<OutputNoteBatch>,
45    /// The nullifiers created by this block.
46    ///
47    /// These are the nullifiers of all input notes after note erasure has been done, so these are
48    /// the nullifiers of all _authenticated_ notes consumed in the block.
49    created_nullifiers: BTreeMap<Nullifier, NullifierWitness>,
50    /// The [`PartialBlockchain`] at the state of the previous block header. It is used to:
51    /// - authenticate unauthenticated notes whose note inclusion proof references a block.
52    /// - authenticate all reference blocks of the batches in this block.
53    partial_blockchain: PartialBlockchain,
54    /// The previous block's header which this block builds on top of.
55    ///
56    /// As part of proving the block, this header will be added to the next partial blockchain.
57    prev_block_header: BlockHeader,
58}
59
60impl ProposedBlock {
61    // CONSTRUCTORS
62    // --------------------------------------------------------------------------------------------
63
64    /// Creates a new proposed block from the provided [`BlockInputs`], transaction batches and
65    /// timestamp.
66    ///
67    /// This checks most of the constraints of a block and computes most of the data structure
68    /// updates except for the more expensive tree updates (nullifier, account and chain
69    /// commitment).
70    ///
71    /// # Errors
72    ///
73    /// Returns an error if any of the following conditions are met.
74    ///
75    /// ## Batches
76    ///
77    /// - The number of batches exceeds [`MAX_BATCHES_PER_BLOCK`].
78    /// - There are duplicate batches, i.e. they have the same [`BatchId`].
79    /// - The expiration block number of any batch is less than the block number of the currently
80    ///   proposed block.
81    ///
82    /// ## Chain
83    ///
84    /// - The length of the [`PartialBlockchain`] in the block inputs is not equal to the previous
85    ///   block header in the block inputs.
86    /// - The [`PartialBlockchain`]'s chain commitment is not equal to the
87    ///   [`BlockHeader::chain_commitment`] of the previous block header.
88    ///
89    /// ## Notes
90    ///
91    /// Note that, in the following, the set of authenticated notes includes unauthenticated notes
92    /// that have been authenticated.
93    ///
94    /// - The union of all input notes across all batches contain duplicates.
95    /// - The union of all output notes across all batches contain duplicates.
96    /// - There is an unauthenticated input note and an output note with the same note ID but their
97    ///   note commitments are different (i.e. their metadata is different).
98    /// - There is a note inclusion proof for an unauthenticated note whose referenced block is not
99    ///   in the [`PartialBlockchain`].
100    /// - The note inclusion proof for an unauthenticated is invalid.
101    /// - There are any unauthenticated notes for which no note inclusion proof is provided.
102    /// - A [`NullifierWitness`] is missing for an authenticated note.
103    /// - If the [`NullifierWitness`] for an authenticated note proves that the note was already
104    ///   consumed.
105    ///
106    /// ## Accounts
107    ///
108    /// - An [`AccountWitness`] is missing for an account updated by a batch.
109    /// - Any two batches update the same account from the same state. For example, if batch 1
110    ///   updates some account from state A to B and batch 2 updates it from A to F, then those
111    ///   batches conflict as they both start from the same initial state but produce a fork in the
112    ///   account's state.
113    /// - Account updates from different batches cannot be brought in a contiguous order. For
114    ///   example, if a batch 1 updates an account from state A to C, and a batch 2 updates it from
115    ///   D to F, then the state transition from C to D is missing. Note that this does not mean,
116    ///   that batches must be provided in an order where account updates chain together in the
117    ///   order of the batches, which would generally be an impossible requirement to fulfill.
118    /// - Account updates cannot be merged, i.e. if [`AccountUpdateDetails::merge`] fails on the
119    ///   updates from two batches.
120    ///
121    /// ## Time
122    ///
123    /// - The given `timestamp` does not increase monotonically compared to the previous block
124    ///   header' timestamp.
125    pub fn new_at(
126        block_inputs: BlockInputs,
127        batches: Vec<ProvenBatch>,
128        timestamp: u32,
129    ) -> Result<Self, ProposedBlockError> {
130        // Check for duplicate and max number of batches.
131        // --------------------------------------------------------------------------------------------
132
133        if batches.len() > MAX_BATCHES_PER_BLOCK {
134            return Err(ProposedBlockError::TooManyBatches);
135        }
136
137        check_duplicate_batches(&batches)?;
138
139        // Check timestamp increases monotonically.
140        // --------------------------------------------------------------------------------------------
141
142        check_timestamp_increases_monotonically(timestamp, block_inputs.prev_block_header())?;
143
144        // Check for batch expiration.
145        // --------------------------------------------------------------------------------------------
146
147        check_batch_expiration(&batches, block_inputs.prev_block_header())?;
148
149        // Check for consistency between the partial blockchain and the referenced previous block.
150        // --------------------------------------------------------------------------------------------
151
152        check_reference_block_partial_blockchain_consistency(
153            block_inputs.partial_blockchain(),
154            block_inputs.prev_block_header(),
155        )?;
156
157        // Check every block referenced by a batch is in the partial blockchain.
158        // --------------------------------------------------------------------------------------------
159
160        check_batch_reference_blocks(
161            block_inputs.partial_blockchain(),
162            block_inputs.prev_block_header(),
163            &batches,
164        )?;
165
166        // Check for duplicates in the input and output notes and compute the input and output notes
167        // of the block by erasing notes that are created and consumed within this block as well as
168        // authenticating unauthenticated notes.
169        // --------------------------------------------------------------------------------------------
170
171        let (block_input_notes, block_erased_notes, block_output_notes) =
172            InputOutputNoteTracker::from_batches(
173                batches.iter(),
174                block_inputs.unauthenticated_note_proofs(),
175                block_inputs.partial_blockchain(),
176                block_inputs.prev_block_header(),
177            )?;
178
179        // All unauthenticated notes must be erased or authenticated by now.
180        if let Some(nullifier) = block_input_notes
181            .iter()
182            .find_map(|note| (!note.is_authenticated()).then_some(note.nullifier()))
183        {
184            return Err(ProposedBlockError::UnauthenticatedNoteConsumed { nullifier });
185        }
186
187        // Check for nullifiers proofs and unspent nullifiers.
188        // --------------------------------------------------------------------------------------------
189
190        let (prev_block_header, partial_blockchain, account_witnesses, mut nullifier_witnesses, _) =
191            block_inputs.into_parts();
192
193        // Remove nullifiers of erased notes, so we only add the nullifiers of actual input notes to
194        // the proposed block.
195        remove_erased_nullifiers(&mut nullifier_witnesses, block_erased_notes.into_iter());
196
197        // Check against computed block_input_notes which also contain unauthenticated notes that
198        // have been authenticated.
199        check_nullifiers(
200            &nullifier_witnesses,
201            block_input_notes.iter().map(InputNoteCommitment::nullifier),
202        )?;
203
204        // Aggregate account updates across batches.
205        // --------------------------------------------------------------------------------------------
206
207        let aggregator = AccountUpdateAggregator::from_batches(&batches)?;
208        let account_updated_witnesses = aggregator.into_update_witnesses(account_witnesses)?;
209
210        // Compute the block's output note batches from the individual batch output notes.
211        // --------------------------------------------------------------------------------------------
212
213        let output_note_batches = compute_block_output_notes(&batches, block_output_notes);
214
215        // Build proposed blocks from parts.
216        // --------------------------------------------------------------------------------------------
217
218        Ok(Self {
219            batches: OrderedBatches::new(batches),
220            timestamp,
221            account_updated_witnesses,
222            output_note_batches,
223            created_nullifiers: nullifier_witnesses,
224            partial_blockchain,
225            prev_block_header,
226        })
227    }
228
229    /// Creates a new proposed block from the provided [`BlockInputs`] and transaction batches.
230    ///
231    /// Equivalent to [`ProposedBlock::new_at`] except that the timestamp of the proposed block is
232    /// set to the current system time or the previous block header's timestamp + 1, whichever
233    /// is greater. This guarantees that the timestamp increases monotonically.
234    ///
235    /// See the [`ProposedBlock::new_at`] for details on errors and other constraints.
236    #[cfg(feature = "std")]
237    pub fn new(
238        block_inputs: BlockInputs,
239        batches: Vec<ProvenBatch>,
240    ) -> Result<Self, ProposedBlockError> {
241        let timestamp_now: u32 = std::time::SystemTime::now()
242            .duration_since(std::time::UNIX_EPOCH)
243            .expect("now should be after 1970")
244            .as_secs()
245            .try_into()
246            .expect("timestamp should fit in a u32 before the year 2106");
247
248        let timestamp = timestamp_now.max(block_inputs.prev_block_header().timestamp() + 1);
249
250        Self::new_at(block_inputs, batches, timestamp)
251    }
252
253    // ACCESSORS
254    // --------------------------------------------------------------------------------------------
255
256    /// Returns an iterator over all transactions in the block.
257    pub fn transactions(&self) -> impl Iterator<Item = &TransactionHeader> {
258        self.batches
259            .as_slice()
260            .iter()
261            .flat_map(|batch| batch.transactions().as_slice().iter())
262    }
263
264    /// Returns the block number of this proposed block.
265    pub fn block_num(&self) -> BlockNumber {
266        // The chain length is the length at the state of the previous block header, so we have to
267        // add one.
268        self.partial_blockchain().chain_length() + 1
269    }
270
271    /// Returns a reference to the slice of batches in this block.
272    pub fn batches(&self) -> &OrderedBatches {
273        &self.batches
274    }
275
276    /// Returns the map of nullifiers to their proofs from the proposed block.
277    pub fn created_nullifiers(&self) -> &BTreeMap<Nullifier, NullifierWitness> {
278        &self.created_nullifiers
279    }
280
281    /// Returns a reference to the previous block header that this block builds on top of.
282    pub fn prev_block_header(&self) -> &BlockHeader {
283        &self.prev_block_header
284    }
285
286    /// Returns the [`PartialBlockchain`] that this block contains.
287    pub fn partial_blockchain(&self) -> &PartialBlockchain {
288        &self.partial_blockchain
289    }
290
291    /// Returns a reference to the slice of accounts updated in this block.
292    pub fn updated_accounts(&self) -> &[(AccountId, AccountUpdateWitness)] {
293        &self.account_updated_witnesses
294    }
295
296    /// Returns the timestamp of this block.
297    pub fn timestamp(&self) -> u32 {
298        self.timestamp
299    }
300
301    /// Returns a slice of the [`OutputNoteBatch`] of each batch in this block.
302    pub fn output_note_batches(&self) -> &[OutputNoteBatch] {
303        &self.output_note_batches
304    }
305
306    // STATE MUTATORS
307    // --------------------------------------------------------------------------------------------
308
309    /// Consumes self and returns the non-[`Copy`] parts of the block.
310    #[allow(clippy::type_complexity)]
311    pub fn into_parts(
312        self,
313    ) -> (
314        OrderedBatches,
315        Vec<(AccountId, AccountUpdateWitness)>,
316        Vec<OutputNoteBatch>,
317        BTreeMap<Nullifier, NullifierWitness>,
318        PartialBlockchain,
319        BlockHeader,
320    ) {
321        (
322            self.batches,
323            self.account_updated_witnesses,
324            self.output_note_batches,
325            self.created_nullifiers,
326            self.partial_blockchain,
327            self.prev_block_header,
328        )
329    }
330}
331
332// SERIALIZATION
333// ================================================================================================
334
335impl Serializable for ProposedBlock {
336    fn write_into<W: ByteWriter>(&self, target: &mut W) {
337        self.batches.write_into(target);
338        self.timestamp.write_into(target);
339        self.account_updated_witnesses.write_into(target);
340        self.output_note_batches.write_into(target);
341        self.created_nullifiers.write_into(target);
342        self.partial_blockchain.write_into(target);
343        self.prev_block_header.write_into(target);
344    }
345}
346
347impl Deserializable for ProposedBlock {
348    fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
349        let block = Self {
350            batches: OrderedBatches::read_from(source)?,
351            timestamp: u32::read_from(source)?,
352            account_updated_witnesses: <Vec<(AccountId, AccountUpdateWitness)>>::read_from(source)?,
353            output_note_batches: <Vec<OutputNoteBatch>>::read_from(source)?,
354            created_nullifiers: <BTreeMap<Nullifier, NullifierWitness>>::read_from(source)?,
355            partial_blockchain: PartialBlockchain::read_from(source)?,
356            prev_block_header: BlockHeader::read_from(source)?,
357        };
358
359        Ok(block)
360    }
361}
362// HELPER FUNCTIONS
363// ================================================================================================
364
365fn check_duplicate_batches(batches: &[ProvenBatch]) -> Result<(), ProposedBlockError> {
366    let mut input_note_set = BTreeSet::new();
367
368    for batch in batches {
369        if !input_note_set.insert(batch.id()) {
370            return Err(ProposedBlockError::DuplicateBatch { batch_id: batch.id() });
371        }
372    }
373
374    Ok(())
375}
376
377fn check_timestamp_increases_monotonically(
378    provided_timestamp: u32,
379    prev_block_header: &BlockHeader,
380) -> Result<(), ProposedBlockError> {
381    if provided_timestamp <= prev_block_header.timestamp() {
382        Err(ProposedBlockError::TimestampDoesNotIncreaseMonotonically {
383            provided_timestamp,
384            previous_timestamp: prev_block_header.timestamp(),
385        })
386    } else {
387        Ok(())
388    }
389}
390
391/// Checks whether any of the batches is expired and can no longer be included in this block.
392///
393/// To illustrate, a batch which expired at block 4 cannot be included in block 5, but if it
394/// expires at block 5 then it can still be included in block 5.
395fn check_batch_expiration(
396    batches: &[ProvenBatch],
397    prev_block_header: &BlockHeader,
398) -> Result<(), ProposedBlockError> {
399    let current_block_num = prev_block_header.block_num() + 1;
400
401    for batch in batches {
402        if batch.batch_expiration_block_num() < current_block_num {
403            return Err(ProposedBlockError::ExpiredBatch {
404                batch_id: batch.id(),
405                batch_expiration_block_num: batch.batch_expiration_block_num(),
406                current_block_num,
407            });
408        }
409    }
410
411    Ok(())
412}
413
414/// Check that each nullifier in the block has a proof provided and that the nullifier is
415/// unspent. The proofs are required to update the nullifier tree.
416fn check_nullifiers(
417    nullifier_witnesses: &BTreeMap<Nullifier, NullifierWitness>,
418    block_input_notes: impl Iterator<Item = Nullifier>,
419) -> Result<(), ProposedBlockError> {
420    for block_input_note in block_input_notes {
421        match nullifier_witnesses
422            .get(&block_input_note)
423            .and_then(|x| x.proof().get(&block_input_note.inner()))
424        {
425            Some(nullifier_value) => {
426                if nullifier_value != EMPTY_WORD {
427                    return Err(ProposedBlockError::NullifierSpent(block_input_note));
428                }
429            },
430            // If the nullifier witnesses did not contain a proof for this nullifier or the provided
431            // proof was not for this nullifier, then it's an error.
432            None => return Err(ProposedBlockError::NullifierProofMissing(block_input_note)),
433        }
434    }
435
436    Ok(())
437}
438
439/// Removes the nullifiers from the nullifier witnesses that were erased (i.e. created and consumed
440/// within the block).
441fn remove_erased_nullifiers(
442    nullifier_witnesses: &mut BTreeMap<Nullifier, NullifierWitness>,
443    block_erased_notes: impl Iterator<Item = Nullifier>,
444) {
445    for erased_note in block_erased_notes {
446        // We do not check that the nullifier was actually present to allow the block inputs to
447        // not include a nullifier that is known to belong to an erased note.
448        let _ = nullifier_witnesses.remove(&erased_note);
449    }
450}
451
452/// Checks consistency between the previous block header and the provided partial blockchain.
453///
454/// This checks that:
455/// - the chain length of the partial blockchain is equal to the block number of the previous block
456///   header, i.e. the partial blockchain's latest block is the previous' blocks reference block.
457///   The previous block header will be added to the partial blockchain as part of constructing the
458///   current block.
459/// - the root of the partial blockchain is equivalent to the chain commitment of the previous block
460///   header.
461fn check_reference_block_partial_blockchain_consistency(
462    partial_blockchain: &PartialBlockchain,
463    prev_block_header: &BlockHeader,
464) -> Result<(), ProposedBlockError> {
465    // Make sure that the current partial blockchain has blocks up to prev_block_header - 1, i.e.
466    // its chain length is equal to the block number of the previous block header.
467    if partial_blockchain.chain_length() != prev_block_header.block_num() {
468        return Err(ProposedBlockError::ChainLengthNotEqualToPreviousBlockNumber {
469            chain_length: partial_blockchain.chain_length(),
470            prev_block_num: prev_block_header.block_num(),
471        });
472    }
473
474    let chain_commitment = partial_blockchain.peaks().hash_peaks();
475    if chain_commitment != prev_block_header.chain_commitment() {
476        return Err(ProposedBlockError::ChainRootNotEqualToPreviousBlockChainCommitment {
477            chain_commitment,
478            prev_block_chain_commitment: prev_block_header.chain_commitment(),
479            prev_block_num: prev_block_header.block_num(),
480        });
481    }
482
483    Ok(())
484}
485
486/// Check that each block referenced by a batch in the block has an entry in the partial blockchain,
487/// except if the referenced block is the same as the previous block, referenced by the block.
488fn check_batch_reference_blocks(
489    partial_blockchain: &PartialBlockchain,
490    prev_block_header: &BlockHeader,
491    batches: &[ProvenBatch],
492) -> Result<(), ProposedBlockError> {
493    for batch in batches {
494        let batch_reference_block_num = batch.reference_block_num();
495        if batch_reference_block_num != prev_block_header.block_num()
496            && !partial_blockchain.contains_block(batch.reference_block_num())
497        {
498            return Err(ProposedBlockError::BatchReferenceBlockMissingFromChain {
499                reference_block_num: batch.reference_block_num(),
500                batch_id: batch.id(),
501            });
502        }
503    }
504
505    Ok(())
506}
507
508/// Computes the block's output notes from the batches of notes of each batch in the block.
509///
510/// We pass in `block_output_notes` which is the full set of output notes of the block, with output
511/// notes erased that are consumed by some batch in the block.
512///
513/// The batch output notes of each proven batch however contain all the notes that it creates,
514/// including ones that were potentially erased in `block_output_notes`. This means we have to
515/// make the batch output notes consistent with `block_output_notes` by removing the erased notes.
516/// Then it accurately represents what output notes the batch actually creates as part of the block.
517///
518/// Returns the set of [`OutputNoteBatch`]es that each batch creates.
519fn compute_block_output_notes(
520    batches: &[ProvenBatch],
521    mut block_output_notes: BTreeMap<NoteId, (BatchId, OutputNote)>,
522) -> Vec<OutputNoteBatch> {
523    let mut block_output_note_batches = Vec::with_capacity(batches.len());
524
525    for batch in batches.iter() {
526        let batch_output_notes = compute_batch_output_notes(batch, &mut block_output_notes);
527        block_output_note_batches.push(batch_output_notes);
528    }
529
530    block_output_note_batches
531}
532
533/// Computes the output note of the given batch. This is essentially the batch's output notes minus
534/// all erased notes.
535///
536/// If a note in the batch's output notes is not present in the block output notes map it means it
537/// was erased and should therefore not be added to the batch's output notes. If it is present, it
538/// is added to the set of output notes of this batch.
539///
540/// The output note set is returned.
541fn compute_batch_output_notes(
542    batch: &ProvenBatch,
543    block_output_notes: &mut BTreeMap<NoteId, (BatchId, OutputNote)>,
544) -> OutputNoteBatch {
545    // The len of the batch output notes is an upper bound of how many notes the batch could've
546    // produced so we reserve that much space to avoid reallocation.
547    let mut batch_output_notes = Vec::with_capacity(batch.output_notes().len());
548
549    for (note_idx, original_output_note) in batch.output_notes().iter().enumerate() {
550        // If block_output_notes no longer contains a note it means it was erased and we do not
551        // include it in the output notes of the current batch. We include the original index of the
552        // note in the batch so we can later correctly construct the block note tree. This index is
553        // needed because we want to be able to construct the block note tree in two ways: 1) By
554        // inserting the individual batch note trees (with erased notes removed) as subtrees into an
555        // empty block note tree or 2) by iterating the set `OutputNoteBatch`es. If we did not store
556        // the index, then the second method would assume a contiguous layout of output notes and
557        // result in a different tree than the first method.
558        //
559        // Note that because we disallow duplicate output notes, if this map contains the
560        // original note id, then we can be certain it was created by this batch and should stay
561        // in the tree. In other words, there is no ambiguity where a note originated from.
562        if let Some((_batch_id, output_note)) =
563            block_output_notes.remove(&original_output_note.id())
564        {
565            debug_assert_eq!(
566                _batch_id,
567                batch.id(),
568                "batch that contained the note originally is no longer the batch that contains it according to the provided map"
569            );
570            batch_output_notes.push((note_idx, output_note));
571        }
572    }
573
574    batch_output_notes
575}
576
577// ACCOUNT UPDATE AGGREGATOR
578// ================================================================================================
579
580struct AccountUpdateAggregator {
581    /// The map from each account to the map of each of its updates, where the digest is the state
582    /// commitment from which the contained update starts.
583    /// An invariant of this field is that if the outer map has an entry for some account, the
584    /// inner update map is guaranteed to not be empty as well.
585    updates: BTreeMap<AccountId, BTreeMap<Digest, (BatchAccountUpdate, BatchId)>>,
586}
587
588impl AccountUpdateAggregator {
589    fn new() -> Self {
590        Self { updates: BTreeMap::new() }
591    }
592
593    /// Aggregates all updates for the same account and stores each update indexed by its initial
594    /// state commitment so we can easily retrieve them in the next step. This lets us
595    /// chronologically order the updates per account across batches.
596    fn from_batches(batches: &[ProvenBatch]) -> Result<Self, ProposedBlockError> {
597        let mut update_aggregator = AccountUpdateAggregator::new();
598
599        for batch in batches {
600            for (account_id, update) in batch.account_updates() {
601                update_aggregator.insert_update(*account_id, batch.id(), update.clone())?;
602            }
603        }
604
605        Ok(update_aggregator)
606    }
607
608    /// Inserts the update from one batch for a specific account into the map of updates.
609    fn insert_update(
610        &mut self,
611        account_id: AccountId,
612        batch_id: BatchId,
613        update: BatchAccountUpdate,
614    ) -> Result<(), ProposedBlockError> {
615        if let Some((conflicting_update, conflicting_batch_id)) = self
616            .updates
617            .entry(account_id)
618            .or_default()
619            .insert(update.initial_state_commitment(), (update, batch_id))
620        {
621            return Err(ProposedBlockError::ConflictingBatchesUpdateSameAccount {
622                account_id,
623                initial_state_commitment: conflicting_update.initial_state_commitment(),
624                first_batch_id: conflicting_batch_id,
625                second_batch_id: batch_id,
626            });
627        }
628
629        Ok(())
630    }
631
632    /// Consumes self and aggregates the account updates from all contained accounts.
633    /// For each updated account an entry in `account_witnesses` must be present.
634    fn into_update_witnesses(
635        self,
636        mut account_witnesses: BTreeMap<AccountId, AccountWitness>,
637    ) -> Result<Vec<(AccountId, AccountUpdateWitness)>, ProposedBlockError> {
638        let mut account_update_witnesses = Vec::with_capacity(self.updates.len());
639
640        for (account_id, updates_map) in self.updates {
641            let witness = account_witnesses
642                .remove(&account_id)
643                .ok_or(ProposedBlockError::MissingAccountWitness(account_id))?;
644
645            let account_update_witness = Self::aggregate_account(account_id, witness, updates_map)?;
646
647            account_update_witnesses.push((account_id, account_update_witness));
648        }
649
650        Ok(account_update_witnesses)
651    }
652
653    /// Build the update for a single account from the provided map of updates, where each entry is
654    /// the state from which the update starts. This chains updates for this account together in a
655    /// chronological order using the state commitments to link them.
656    fn aggregate_account(
657        account_id: AccountId,
658        initial_state_proof: AccountWitness,
659        mut updates: BTreeMap<Digest, (BatchAccountUpdate, BatchId)>,
660    ) -> Result<AccountUpdateWitness, ProposedBlockError> {
661        // The account witness could prove inclusion of a different ID in which case the initial
662        // state commitment of the current ID is the empty word.
663        let initial_state_commitment = if account_id == initial_state_proof.id() {
664            initial_state_proof.state_commitment()
665        } else {
666            Digest::from(EMPTY_WORD)
667        };
668
669        let mut details: Option<AccountUpdateDetails> = None;
670
671        let mut current_commitment = initial_state_commitment;
672        while !updates.is_empty() {
673            let (update, _) = updates.remove(&current_commitment).ok_or_else(|| {
674                ProposedBlockError::InconsistentAccountStateTransition {
675                    account_id,
676                    state_commitment: current_commitment,
677                    remaining_state_commitments: updates.keys().copied().collect(),
678                }
679            })?;
680
681            current_commitment = update.final_state_commitment();
682            let update_details = update.into_update();
683
684            details = Some(match details {
685                None => update_details,
686                Some(details) => details.merge(update_details).map_err(|source| {
687                    ProposedBlockError::AccountUpdateError { account_id, source: Box::new(source) }
688                })?,
689            });
690        }
691
692        Ok(AccountUpdateWitness::new(
693            initial_state_commitment,
694            current_commitment,
695            initial_state_proof,
696            details.expect("details should be Some as updates is guaranteed to not be empty"),
697        ))
698    }
699}