miden_objects/block/
proposed_block.rs

1use alloc::{
2    collections::{BTreeMap, BTreeSet},
3    vec::Vec,
4};
5
6use crate::{
7    Digest, EMPTY_WORD, MAX_BATCHES_PER_BLOCK,
8    account::{AccountId, delta::AccountUpdateDetails},
9    batch::{BatchAccountUpdate, BatchId, InputOutputNoteTracker, ProvenBatch},
10    block::{
11        AccountUpdateWitness, AccountWitness, BlockHeader, BlockNumber, NullifierWitness,
12        OutputNoteBatch, block_inputs::BlockInputs,
13    },
14    errors::ProposedBlockError,
15    note::{NoteId, Nullifier},
16    transaction::{ChainMmr, InputNoteCommitment, OutputNote, TransactionId},
17    utils::serde::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable},
18};
19
20// PROPOSED BLOCK
21// =================================================================================================
22
23/// A proposed block with many, but not all constraints of a
24/// [`ProvenBlock`](crate::block::ProvenBlock) enforced.
25///
26/// See [`ProposedBlock::new_at`] for details on the checks.
27#[derive(Debug, Clone)]
28pub struct ProposedBlock {
29    /// The transaction batches in this block.
30    batches: Vec<ProvenBatch>,
31    /// The unix timestamp of the block in seconds.
32    timestamp: u32,
33    /// All account's [`AccountUpdateWitness`] that were updated in this block. See its docs for
34    /// details.
35    account_updated_witnesses: Vec<(AccountId, AccountUpdateWitness)>,
36    /// Note batches created by the transactions in this block.
37    ///
38    /// These are the output notes after note erasure has been done, so they represent the actual
39    /// output notes of the block.
40    ///
41    /// The length of this vector is guaranteed to be equal to the length of `batches` and the
42    /// inner batch of output notes may be empty if a batch did not create any notes.
43    output_note_batches: Vec<OutputNoteBatch>,
44    /// The nullifiers created by this block.
45    ///
46    /// These are the nullifiers of all input notes after note erasure has been done, so these are
47    /// the nullifiers of all _authenticated_ notes consumed in the block.
48    created_nullifiers: BTreeMap<Nullifier, NullifierWitness>,
49    /// The [`ChainMmr`] at the state of the previous block header. It is used to:
50    /// - authenticate unauthenticated notes whose note inclusion proof references a block.
51    /// - authenticate all reference blocks of the batches in this block.
52    chain_mmr: ChainMmr,
53    /// The previous block's header which this block builds on top of.
54    ///
55    /// As part of proving the block, this header will be added to the next chain MMR.
56    prev_block_header: BlockHeader,
57}
58
59impl ProposedBlock {
60    // CONSTRUCTORS
61    // --------------------------------------------------------------------------------------------
62
63    /// Creates a new proposed block from the provided [`BlockInputs`], transaction batches and
64    /// timestamp.
65    ///
66    /// This checks most of the constraints of a block and computes most of the data structure
67    /// updates except for the more expensive tree updates (nullifier, account and chain
68    /// commitment).
69    ///
70    /// # Errors
71    ///
72    /// Returns an error if any of the following conditions are met.
73    ///
74    /// ## Batches
75    ///
76    /// - The number of batches exceeds [`MAX_BATCHES_PER_BLOCK`].
77    /// - There are duplicate batches, i.e. they have the same [`BatchId`].
78    /// - The expiration block number of any batch is less than the block number of the currently
79    ///   proposed block.
80    ///
81    /// ## Chain
82    ///
83    /// - The length of the [`ChainMmr`] in the block inputs is not equal to the previous block
84    ///   header in the block inputs.
85    /// - The [`ChainMmr`]'s chain commitment is not equal to the [`BlockHeader::chain_commitment`]
86    ///   of the previous block header.
87    ///
88    /// ## Notes
89    ///
90    /// Note that, in the following, the set of authenticated notes includes unauthenticated notes
91    /// that have been authenticated.
92    ///
93    /// - The union of all input notes across all batches contain duplicates.
94    /// - The union of all output notes across all batches contain duplicates.
95    /// - There is an unauthenticated input note and an output note with the same note ID but their
96    ///   note commitments are different (i.e. their metadata is different).
97    /// - There is a note inclusion proof for an unauthenticated note whose referenced block is not
98    ///   in the [`ChainMmr`].
99    /// - The note inclusion proof for an unauthenticated is invalid.
100    /// - There are any unauthenticated notes for which no note inclusion proof is provided.
101    /// - A [`NullifierWitness`] is missing for an authenticated note.
102    /// - If the [`NullifierWitness`] for an authenticated note proves that the note was already
103    ///   consumed.
104    ///
105    /// ## Accounts
106    ///
107    /// - An [`AccountWitness`] is missing for an account updated by a batch.
108    /// - Any two batches update the same account from the same state. For example, if batch 1
109    ///   updates some account from state A to B and batch 2 updates it from A to F, then those
110    ///   batches conflict as they both start from the same initial state but produce a fork in the
111    ///   account's state.
112    /// - Account updates from different batches cannot be brought in a contiguous order. For
113    ///   example, if a batch 1 updates an account from state A to C, and a batch 2 updates it from
114    ///   D to F, then the state transition from C to D is missing. Note that this does not mean,
115    ///   that batches must be provided in an order where account updates chain together in the
116    ///   order of the batches, which would generally be an impossible requirement to fulfill.
117    /// - Account updates cannot be merged, i.e. if [`AccountUpdateDetails::merge`] fails on the
118    ///   updates from two batches.
119    ///
120    /// ## Time
121    ///
122    /// - The given `timestamp` does not increase monotonically compared to the previous block
123    ///   header' timestamp.
124    pub fn new_at(
125        block_inputs: BlockInputs,
126        batches: Vec<ProvenBatch>,
127        timestamp: u32,
128    ) -> Result<Self, ProposedBlockError> {
129        // Check for duplicate and max number of batches.
130        // --------------------------------------------------------------------------------------------
131
132        if batches.len() > MAX_BATCHES_PER_BLOCK {
133            return Err(ProposedBlockError::TooManyBatches);
134        }
135
136        check_duplicate_batches(&batches)?;
137
138        // Check timestamp increases monotonically.
139        // --------------------------------------------------------------------------------------------
140
141        check_timestamp_increases_monotonically(timestamp, block_inputs.prev_block_header())?;
142
143        // Check for batch expiration.
144        // --------------------------------------------------------------------------------------------
145
146        check_batch_expiration(&batches, block_inputs.prev_block_header())?;
147
148        // Check for consistency between the chain MMR and the referenced previous block.
149        // --------------------------------------------------------------------------------------------
150
151        check_reference_block_chain_mmr_consistency(
152            block_inputs.chain_mmr(),
153            block_inputs.prev_block_header(),
154        )?;
155
156        // Check every block referenced by a batch is in the chain MMR.
157        // --------------------------------------------------------------------------------------------
158
159        check_batch_reference_blocks(
160            block_inputs.chain_mmr(),
161            block_inputs.prev_block_header(),
162            &batches,
163        )?;
164
165        // Check for duplicates in the input and output notes and compute the input and output notes
166        // of the block by erasing notes that are created and consumed within this block as well as
167        // authenticating unauthenticated notes.
168        // --------------------------------------------------------------------------------------------
169
170        let (block_input_notes, block_erased_notes, block_output_notes) =
171            InputOutputNoteTracker::from_batches(
172                batches.iter(),
173                block_inputs.unauthenticated_note_proofs(),
174                block_inputs.chain_mmr(),
175                block_inputs.prev_block_header(),
176            )?;
177
178        // All unauthenticated notes must be erased or authenticated by now.
179        if let Some(nullifier) = block_input_notes
180            .iter()
181            .find_map(|note| (!note.is_authenticated()).then_some(note.nullifier()))
182        {
183            return Err(ProposedBlockError::UnauthenticatedNoteConsumed { nullifier });
184        }
185
186        // Check for nullifiers proofs and unspent nullifiers.
187        // --------------------------------------------------------------------------------------------
188
189        let (prev_block_header, chain_mmr, account_witnesses, mut nullifier_witnesses, _) =
190            block_inputs.into_parts();
191
192        // Remove nullifiers of erased notes, so we only add the nullifiers of actual input notes to
193        // the proposed block.
194        remove_erased_nullifiers(&mut nullifier_witnesses, block_erased_notes.into_iter());
195
196        // Check against computed block_input_notes which also contain unauthenticated notes that
197        // have been authenticated.
198        check_nullifiers(
199            &nullifier_witnesses,
200            block_input_notes.iter().map(InputNoteCommitment::nullifier),
201        )?;
202
203        // Aggregate account updates across batches.
204        // --------------------------------------------------------------------------------------------
205
206        let aggregator = AccountUpdateAggregator::from_batches(&batches)?;
207        let account_updated_witnesses = aggregator.into_update_witnesses(account_witnesses)?;
208
209        // Compute the block's output note batches from the individual batch output notes.
210        // --------------------------------------------------------------------------------------------
211
212        let output_note_batches = compute_block_output_notes(&batches, block_output_notes);
213
214        // Build proposed blocks from parts.
215        // --------------------------------------------------------------------------------------------
216
217        Ok(Self {
218            batches,
219            timestamp,
220            account_updated_witnesses,
221            output_note_batches,
222            created_nullifiers: nullifier_witnesses,
223            chain_mmr,
224            prev_block_header,
225        })
226    }
227
228    /// Creates a new proposed block from the provided [`BlockInputs`] and transaction batches.
229    ///
230    /// Equivalent to [`ProposedBlock::new_at`] except that the timestamp of the proposed block is
231    /// set to the current system time or the previous block header's timestamp + 1, whichever
232    /// is greater. This guarantees that the timestamp increases monotonically.
233    ///
234    /// See the [`ProposedBlock::new_at`] for details on errors and other constraints.
235    #[cfg(feature = "std")]
236    pub fn new(
237        block_inputs: BlockInputs,
238        batches: Vec<ProvenBatch>,
239    ) -> Result<Self, ProposedBlockError> {
240        let timestamp_now: u32 = std::time::SystemTime::now()
241            .duration_since(std::time::UNIX_EPOCH)
242            .expect("now should be after 1970")
243            .as_secs()
244            .try_into()
245            .expect("timestamp should fit in a u32 before the year 2106");
246
247        let timestamp = timestamp_now.max(block_inputs.prev_block_header().timestamp() + 1);
248
249        Self::new_at(block_inputs, batches, timestamp)
250    }
251
252    // ACCESSORS
253    // --------------------------------------------------------------------------------------------
254
255    /// Returns an iterator over all transactions which affected accounts in the block with
256    /// corresponding account IDs.
257    pub fn affected_accounts(&self) -> impl Iterator<Item = (TransactionId, AccountId)> + '_ {
258        self.account_updated_witnesses.iter().flat_map(|(account_id, update)| {
259            update.transactions().iter().map(move |tx_id| (*tx_id, *account_id))
260        })
261    }
262
263    /// Returns the block number of this proposed block.
264    pub fn block_num(&self) -> BlockNumber {
265        // The chain length is the length at the state of the previous block header, so we have to
266        // add one.
267        self.chain_mmr().chain_length() + 1
268    }
269
270    /// Returns a reference to the slice of batches in this block.
271    pub fn batches(&self) -> &[ProvenBatch] {
272        &self.batches
273    }
274
275    /// Returns the map of nullifiers to their proofs from the proposed block.
276    pub fn created_nullifiers(&self) -> &BTreeMap<Nullifier, NullifierWitness> {
277        &self.created_nullifiers
278    }
279
280    /// Returns a reference to the previous block header that this block builds on top of.
281    pub fn prev_block_header(&self) -> &BlockHeader {
282        &self.prev_block_header
283    }
284
285    /// Returns the [`ChainMmr`] that this block contains.
286    pub fn chain_mmr(&self) -> &ChainMmr {
287        &self.chain_mmr
288    }
289
290    /// Returns a reference to the slice of accounts updated in this block.
291    pub fn updated_accounts(&self) -> &[(AccountId, AccountUpdateWitness)] {
292        &self.account_updated_witnesses
293    }
294
295    /// Returns the timestamp of this block.
296    pub fn timestamp(&self) -> u32 {
297        self.timestamp
298    }
299
300    /// Returns a slice of the [`OutputNoteBatch`] of each batch in this block.
301    pub fn output_note_batches(&self) -> &[OutputNoteBatch] {
302        &self.output_note_batches
303    }
304
305    // STATE MUTATORS
306    // --------------------------------------------------------------------------------------------
307
308    /// Consumes self and returns the non-[`Copy`] parts of the block.
309    #[allow(clippy::type_complexity)]
310    pub fn into_parts(
311        self,
312    ) -> (
313        Vec<ProvenBatch>,
314        Vec<(AccountId, AccountUpdateWitness)>,
315        Vec<OutputNoteBatch>,
316        BTreeMap<Nullifier, NullifierWitness>,
317        ChainMmr,
318        BlockHeader,
319    ) {
320        (
321            self.batches,
322            self.account_updated_witnesses,
323            self.output_note_batches,
324            self.created_nullifiers,
325            self.chain_mmr,
326            self.prev_block_header,
327        )
328    }
329}
330
331// SERIALIZATION
332// ================================================================================================
333
334impl Serializable for ProposedBlock {
335    fn write_into<W: ByteWriter>(&self, target: &mut W) {
336        self.batches.write_into(target);
337        self.timestamp.write_into(target);
338        self.account_updated_witnesses.write_into(target);
339        self.output_note_batches.write_into(target);
340        self.created_nullifiers.write_into(target);
341        self.chain_mmr.write_into(target);
342        self.prev_block_header.write_into(target);
343    }
344}
345
346impl Deserializable for ProposedBlock {
347    fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
348        let block = Self {
349            batches: <Vec<ProvenBatch>>::read_from(source)?,
350            timestamp: u32::read_from(source)?,
351            account_updated_witnesses: <Vec<(AccountId, AccountUpdateWitness)>>::read_from(source)?,
352            output_note_batches: <Vec<OutputNoteBatch>>::read_from(source)?,
353            created_nullifiers: <BTreeMap<Nullifier, NullifierWitness>>::read_from(source)?,
354            chain_mmr: ChainMmr::read_from(source)?,
355            prev_block_header: BlockHeader::read_from(source)?,
356        };
357
358        Ok(block)
359    }
360}
361// HELPER FUNCTIONS
362// ================================================================================================
363
364fn check_duplicate_batches(batches: &[ProvenBatch]) -> Result<(), ProposedBlockError> {
365    let mut input_note_set = BTreeSet::new();
366
367    for batch in batches {
368        if !input_note_set.insert(batch.id()) {
369            return Err(ProposedBlockError::DuplicateBatch { batch_id: batch.id() });
370        }
371    }
372
373    Ok(())
374}
375
376fn check_timestamp_increases_monotonically(
377    provided_timestamp: u32,
378    prev_block_header: &BlockHeader,
379) -> Result<(), ProposedBlockError> {
380    if provided_timestamp <= prev_block_header.timestamp() {
381        Err(ProposedBlockError::TimestampDoesNotIncreaseMonotonically {
382            provided_timestamp,
383            previous_timestamp: prev_block_header.timestamp(),
384        })
385    } else {
386        Ok(())
387    }
388}
389
390/// Checks whether any of the batches is expired and can no longer be included in this block.
391///
392/// To illustrate, a batch which expired at block 4 cannot be included in block 5, but if it
393/// expires at block 5 then it can still be included in block 5.
394fn check_batch_expiration(
395    batches: &[ProvenBatch],
396    prev_block_header: &BlockHeader,
397) -> Result<(), ProposedBlockError> {
398    let current_block_num = prev_block_header.block_num() + 1;
399
400    for batch in batches {
401        if batch.batch_expiration_block_num() < current_block_num {
402            return Err(ProposedBlockError::ExpiredBatch {
403                batch_id: batch.id(),
404                batch_expiration_block_num: batch.batch_expiration_block_num(),
405                current_block_num,
406            });
407        }
408    }
409
410    Ok(())
411}
412
413/// Check that each nullifier in the block has a proof provided and that the nullifier is
414/// unspent. The proofs are required to update the nullifier tree.
415fn check_nullifiers(
416    nullifier_witnesses: &BTreeMap<Nullifier, NullifierWitness>,
417    block_input_notes: impl Iterator<Item = Nullifier>,
418) -> Result<(), ProposedBlockError> {
419    for block_input_note in block_input_notes {
420        match nullifier_witnesses
421            .get(&block_input_note)
422            .and_then(|x| x.proof().get(&block_input_note.inner()))
423        {
424            Some(nullifier_value) => {
425                if nullifier_value != EMPTY_WORD {
426                    return Err(ProposedBlockError::NullifierSpent(block_input_note));
427                }
428            },
429            // If the nullifier witnesses did not contain a proof for this nullifier or the provided
430            // proof was not for this nullifier, then it's an error.
431            None => return Err(ProposedBlockError::NullifierProofMissing(block_input_note)),
432        }
433    }
434
435    Ok(())
436}
437
438/// Removes the nullifiers from the nullifier witnesses that were erased (i.e. created and consumed
439/// within the block).
440fn remove_erased_nullifiers(
441    nullifier_witnesses: &mut BTreeMap<Nullifier, NullifierWitness>,
442    block_erased_notes: impl Iterator<Item = Nullifier>,
443) {
444    for erased_note in block_erased_notes {
445        // We do not check that the nullifier was actually present to allow the block inputs to
446        // not include a nullifier that is known to belong to an erased note.
447        let _ = nullifier_witnesses.remove(&erased_note);
448    }
449}
450
451/// Checks consistency between the previous block header and the provided chain MMR.
452///
453/// This checks that:
454/// - the chain length of the chain MMR is equal to the block number of the previous block header,
455///   i.e. the chain MMR's latest block is the previous' blocks reference block. The previous block
456///   header will be added to the chain MMR as part of constructing the current block.
457/// - the root of the chain MMR is equivalent to the chain commitment of the previous block header.
458fn check_reference_block_chain_mmr_consistency(
459    chain_mmr: &ChainMmr,
460    prev_block_header: &BlockHeader,
461) -> Result<(), ProposedBlockError> {
462    // Make sure that the current chain MMR has blocks up to prev_block_header - 1, i.e. its
463    // chain length is equal to the block number of the previous block header.
464    if chain_mmr.chain_length() != prev_block_header.block_num() {
465        return Err(ProposedBlockError::ChainLengthNotEqualToPreviousBlockNumber {
466            chain_length: chain_mmr.chain_length(),
467            prev_block_num: prev_block_header.block_num(),
468        });
469    }
470
471    let chain_commitment = chain_mmr.peaks().hash_peaks();
472    if chain_commitment != prev_block_header.chain_commitment() {
473        return Err(ProposedBlockError::ChainRootNotEqualToPreviousBlockChainCommitment {
474            chain_commitment,
475            prev_block_chain_commitment: prev_block_header.chain_commitment(),
476            prev_block_num: prev_block_header.block_num(),
477        });
478    }
479
480    Ok(())
481}
482
483/// Check that each block referenced by a batch in the block has an entry in the chain MMR,
484/// except if the referenced block is the same as the previous block, referenced by the block.
485fn check_batch_reference_blocks(
486    chain_mmr: &ChainMmr,
487    prev_block_header: &BlockHeader,
488    batches: &[ProvenBatch],
489) -> Result<(), ProposedBlockError> {
490    for batch in batches {
491        let batch_reference_block_num = batch.reference_block_num();
492        if batch_reference_block_num != prev_block_header.block_num()
493            && !chain_mmr.contains_block(batch.reference_block_num())
494        {
495            return Err(ProposedBlockError::BatchReferenceBlockMissingFromChain {
496                reference_block_num: batch.reference_block_num(),
497                batch_id: batch.id(),
498            });
499        }
500    }
501
502    Ok(())
503}
504
505/// Computes the block's output notes from the batches of notes of each batch in the block.
506///
507/// We pass in `block_output_notes` which is the full set of output notes of the block, with output
508/// notes erased that are consumed by some batch in the block.
509///
510/// The batch output notes of each proven batch however contain all the notes that it creates,
511/// including ones that were potentially erased in `block_output_notes`. This means we have to
512/// make the batch output notes consistent with `block_output_notes` by removing the erased notes.
513/// Then it accurately represents what output notes the batch actually creates as part of the block.
514///
515/// Returns the set of [`OutputNoteBatch`]es that each batch creates.
516fn compute_block_output_notes(
517    batches: &[ProvenBatch],
518    mut block_output_notes: BTreeMap<NoteId, (BatchId, OutputNote)>,
519) -> Vec<OutputNoteBatch> {
520    let mut block_output_note_batches = Vec::with_capacity(batches.len());
521
522    for batch in batches.iter() {
523        let batch_output_notes = compute_batch_output_notes(batch, &mut block_output_notes);
524        block_output_note_batches.push(batch_output_notes);
525    }
526
527    block_output_note_batches
528}
529
530/// Computes the output note of the given batch. This is essentially the batch's output notes minus
531/// all erased notes.
532///
533/// If a note in the batch's output notes is not present in the block output notes map it means it
534/// was erased and should therefore not be added to the batch's output notes. If it is present, it
535/// is added to the set of output notes of this batch.
536///
537/// The output note set is returned.
538fn compute_batch_output_notes(
539    batch: &ProvenBatch,
540    block_output_notes: &mut BTreeMap<NoteId, (BatchId, OutputNote)>,
541) -> OutputNoteBatch {
542    // The len of the batch output notes is an upper bound of how many notes the batch could've
543    // produced so we reserve that much space to avoid reallocation.
544    let mut batch_output_notes = Vec::with_capacity(batch.output_notes().len());
545
546    for (note_idx, original_output_note) in batch.output_notes().iter().enumerate() {
547        // If block_output_notes no longer contains a note it means it was erased and we do not
548        // include it in the output notes of the current batch. We include the original index of the
549        // note in the batch so we can later correctly construct the block note tree. This index is
550        // needed because we want to be able to construct the block note tree in two ways: 1) By
551        // inserting the individual batch note trees (with erased notes removed) as subtrees into an
552        // empty block note tree or 2) by iterating the set `OutputNoteBatch`es. If we did not store
553        // the index, then the second method would assume a contiguous layout of output notes and
554        // result in a different tree than the first method.
555        //
556        // Note that because we disallow duplicate output notes, if this map contains the
557        // original note id, then we can be certain it was created by this batch and should stay
558        // in the tree. In other words, there is no ambiguity where a note originated from.
559        if let Some((_batch_id, output_note)) =
560            block_output_notes.remove(&original_output_note.id())
561        {
562            debug_assert_eq!(
563                _batch_id,
564                batch.id(),
565                "batch that contained the note originally is no longer the batch that contains it according to the provided map"
566            );
567            batch_output_notes.push((note_idx, output_note));
568        }
569    }
570
571    batch_output_notes
572}
573
574// ACCOUNT UPDATE AGGREGATOR
575// ================================================================================================
576
577struct AccountUpdateAggregator {
578    /// The map from each account to the map of each of its updates, where the digest is the state
579    /// commitment from which the contained update starts.
580    /// An invariant of this field is that if the outer map has an entry for some account, the
581    /// inner update map is guaranteed to not be empty as well.
582    updates: BTreeMap<AccountId, BTreeMap<Digest, (BatchAccountUpdate, BatchId)>>,
583}
584
585impl AccountUpdateAggregator {
586    fn new() -> Self {
587        Self { updates: BTreeMap::new() }
588    }
589
590    /// Aggregates all updates for the same account and stores each update indexed by its initial
591    /// state commitment so we can easily retrieve them in the next step. This lets us
592    /// chronologically order the updates per account across batches.
593    fn from_batches(batches: &[ProvenBatch]) -> Result<Self, ProposedBlockError> {
594        let mut update_aggregator = AccountUpdateAggregator::new();
595
596        for batch in batches {
597            for (account_id, update) in batch.account_updates() {
598                update_aggregator.insert_update(*account_id, batch.id(), update.clone())?;
599            }
600        }
601
602        Ok(update_aggregator)
603    }
604
605    /// Inserts the update from one batch for a specific account into the map of updates.
606    fn insert_update(
607        &mut self,
608        account_id: AccountId,
609        batch_id: BatchId,
610        update: BatchAccountUpdate,
611    ) -> Result<(), ProposedBlockError> {
612        if let Some((conflicting_update, conflicting_batch_id)) = self
613            .updates
614            .entry(account_id)
615            .or_default()
616            .insert(update.initial_state_commitment(), (update, batch_id))
617        {
618            return Err(ProposedBlockError::ConflictingBatchesUpdateSameAccount {
619                account_id,
620                initial_state_commitment: conflicting_update.initial_state_commitment(),
621                first_batch_id: conflicting_batch_id,
622                second_batch_id: batch_id,
623            });
624        }
625
626        Ok(())
627    }
628
629    /// Consumes self and aggregates the account updates from all contained accounts.
630    /// For each updated account an entry in `account_witnesses` must be present.
631    fn into_update_witnesses(
632        self,
633        mut account_witnesses: BTreeMap<AccountId, AccountWitness>,
634    ) -> Result<Vec<(AccountId, AccountUpdateWitness)>, ProposedBlockError> {
635        let mut account_update_witnesses = Vec::with_capacity(self.updates.len());
636
637        for (account_id, updates_map) in self.updates {
638            let witness = account_witnesses
639                .remove(&account_id)
640                .ok_or(ProposedBlockError::MissingAccountWitness(account_id))?;
641
642            let account_update_witness = Self::aggregate_account(account_id, witness, updates_map)?;
643
644            account_update_witnesses.push((account_id, account_update_witness));
645        }
646
647        Ok(account_update_witnesses)
648    }
649
650    /// Build the update for a single account from the provided map of updates, where each entry is
651    /// the state from which the update starts. This chains updates for this account together in a
652    /// chronological order using the state commitments to link them.
653    fn aggregate_account(
654        account_id: AccountId,
655        witness: AccountWitness,
656        mut updates: BTreeMap<Digest, (BatchAccountUpdate, BatchId)>,
657    ) -> Result<AccountUpdateWitness, ProposedBlockError> {
658        let (initial_state_commitment, initial_state_proof) = witness.into_parts();
659        let mut details: Option<AccountUpdateDetails> = None;
660
661        let mut transactions = Vec::new();
662        let mut current_commitment = initial_state_commitment;
663        while !updates.is_empty() {
664            let (update, _) = updates.remove(&current_commitment).ok_or_else(|| {
665                ProposedBlockError::InconsistentAccountStateTransition {
666                    account_id,
667                    state_commitment: current_commitment,
668                    remaining_state_commitments: updates.keys().copied().collect(),
669                }
670            })?;
671
672            current_commitment = update.final_state_commitment();
673            let (update_transactions, update_details) = update.into_parts();
674            transactions.extend(update_transactions);
675
676            details = Some(match details {
677                None => update_details,
678                Some(details) => details.merge(update_details).map_err(|source| {
679                    ProposedBlockError::AccountUpdateError { account_id, source }
680                })?,
681            });
682        }
683
684        Ok(AccountUpdateWitness::new(
685            initial_state_commitment,
686            current_commitment,
687            initial_state_proof,
688            details.expect("details should be Some as updates is guaranteed to not be empty"),
689            transactions,
690        ))
691    }
692}