miden_objects/block/
proposed_block.rs

1use alloc::boxed::Box;
2use alloc::collections::{BTreeMap, BTreeSet};
3use alloc::vec::Vec;
4
5use crate::account::AccountId;
6use crate::account::delta::AccountUpdateDetails;
7use crate::batch::{
8    BatchAccountUpdate,
9    BatchId,
10    InputOutputNoteTracker,
11    OrderedBatches,
12    ProvenBatch,
13};
14use crate::block::block_inputs::BlockInputs;
15use crate::block::{
16    AccountUpdateWitness,
17    AccountWitness,
18    BlockHeader,
19    BlockNumber,
20    NullifierWitness,
21    OutputNoteBatch,
22};
23use crate::errors::ProposedBlockError;
24use crate::note::{NoteId, Nullifier};
25use crate::transaction::{InputNoteCommitment, OutputNote, PartialBlockchain, TransactionHeader};
26use crate::utils::serde::{
27    ByteReader,
28    ByteWriter,
29    Deserializable,
30    DeserializationError,
31    Serializable,
32};
33use crate::{EMPTY_WORD, MAX_BATCHES_PER_BLOCK, Word};
34
35// PROPOSED BLOCK
36// =================================================================================================
37
38/// A proposed block with many, but not all constraints of a
39/// [`ProvenBlock`](crate::block::ProvenBlock) enforced.
40///
41/// See [`ProposedBlock::new_at`] for details on the checks.
42#[derive(Debug, Clone)]
43pub struct ProposedBlock {
44    /// The transaction batches in this block.
45    batches: OrderedBatches,
46    /// The unix timestamp of the block in seconds.
47    timestamp: u32,
48    /// All account's [`AccountUpdateWitness`] that were updated in this block. See its docs for
49    /// details.
50    account_updated_witnesses: Vec<(AccountId, AccountUpdateWitness)>,
51    /// Note batches created by the transactions in this block.
52    ///
53    /// These are the output notes after note erasure has been done, so they represent the actual
54    /// output notes of the block.
55    ///
56    /// The length of this vector is guaranteed to be equal to the length of `batches` and the
57    /// inner batch of output notes may be empty if a batch did not create any notes.
58    output_note_batches: Vec<OutputNoteBatch>,
59    /// The nullifiers created by this block.
60    ///
61    /// These are the nullifiers of all input notes after note erasure has been done, so these are
62    /// the nullifiers of all _authenticated_ notes consumed in the block.
63    created_nullifiers: BTreeMap<Nullifier, NullifierWitness>,
64    /// The [`PartialBlockchain`] at the state of the previous block header. It is used to:
65    /// - authenticate unauthenticated notes whose note inclusion proof references a block.
66    /// - authenticate all reference blocks of the batches in this block.
67    partial_blockchain: PartialBlockchain,
68    /// The previous block's header which this block builds on top of.
69    ///
70    /// As part of proving the block, this header will be added to the next partial blockchain.
71    prev_block_header: BlockHeader,
72}
73
74impl ProposedBlock {
75    // CONSTRUCTORS
76    // --------------------------------------------------------------------------------------------
77
78    /// Creates a new proposed block from the provided [`BlockInputs`], transaction batches and
79    /// timestamp.
80    ///
81    /// This checks most of the constraints of a block and computes most of the data structure
82    /// updates except for the more expensive tree updates (nullifier, account and chain
83    /// commitment).
84    ///
85    /// # Errors
86    ///
87    /// Returns an error if any of the following conditions are met.
88    ///
89    /// ## Batches
90    ///
91    /// - The number of batches exceeds [`MAX_BATCHES_PER_BLOCK`].
92    /// - There are duplicate batches, i.e. they have the same [`BatchId`].
93    /// - The expiration block number of any batch is less than the block number of the currently
94    ///   proposed block.
95    ///
96    /// ## Chain
97    ///
98    /// - The length of the [`PartialBlockchain`] in the block inputs is not equal to the previous
99    ///   block header in the block inputs.
100    /// - The [`PartialBlockchain`]'s chain commitment is not equal to the
101    ///   [`BlockHeader::chain_commitment`] of the previous block header.
102    ///
103    /// ## Notes
104    ///
105    /// Note that, in the following, the set of authenticated notes includes unauthenticated notes
106    /// that have been authenticated.
107    ///
108    /// - The union of all input notes across all batches contain duplicates.
109    /// - The union of all output notes across all batches contain duplicates.
110    /// - There is an unauthenticated input note and an output note with the same note ID but their
111    ///   note commitments are different (i.e. their metadata is different).
112    /// - There is a note inclusion proof for an unauthenticated note whose referenced block is not
113    ///   in the [`PartialBlockchain`].
114    /// - The note inclusion proof for an unauthenticated is invalid.
115    /// - There are any unauthenticated notes for which no note inclusion proof is provided.
116    /// - A [`NullifierWitness`] is missing for an authenticated note.
117    /// - If the [`NullifierWitness`] for an authenticated note proves that the note was already
118    ///   consumed.
119    ///
120    /// ## Accounts
121    ///
122    /// - An [`AccountWitness`] is missing for an account updated by a batch.
123    /// - Any two batches update the same account from the same state. For example, if batch 1
124    ///   updates some account from state A to B and batch 2 updates it from A to F, then those
125    ///   batches conflict as they both start from the same initial state but produce a fork in the
126    ///   account's state.
127    /// - Account updates from different batches cannot be brought in a contiguous order. For
128    ///   example, if a batch 1 updates an account from state A to C, and a batch 2 updates it from
129    ///   D to F, then the state transition from C to D is missing. Note that this does not mean,
130    ///   that batches must be provided in an order where account updates chain together in the
131    ///   order of the batches, which would generally be an impossible requirement to fulfill.
132    /// - Account updates cannot be merged, i.e. if [`AccountUpdateDetails::merge`] fails on the
133    ///   updates from two batches.
134    ///
135    /// ## Time
136    ///
137    /// - The given `timestamp` does not increase monotonically compared to the previous block
138    ///   header' timestamp.
139    pub fn new_at(
140        block_inputs: BlockInputs,
141        batches: Vec<ProvenBatch>,
142        timestamp: u32,
143    ) -> Result<Self, ProposedBlockError> {
144        // Check for duplicate and max number of batches.
145        // --------------------------------------------------------------------------------------------
146
147        if batches.len() > MAX_BATCHES_PER_BLOCK {
148            return Err(ProposedBlockError::TooManyBatches);
149        }
150
151        check_duplicate_batches(&batches)?;
152
153        // Check timestamp increases monotonically.
154        // --------------------------------------------------------------------------------------------
155
156        check_timestamp_increases_monotonically(timestamp, block_inputs.prev_block_header())?;
157
158        // Check for batch expiration.
159        // --------------------------------------------------------------------------------------------
160
161        check_batch_expiration(&batches, block_inputs.prev_block_header())?;
162
163        // Check for consistency between the partial blockchain and the referenced previous block.
164        // --------------------------------------------------------------------------------------------
165
166        check_reference_block_partial_blockchain_consistency(
167            block_inputs.partial_blockchain(),
168            block_inputs.prev_block_header(),
169        )?;
170
171        // Check every block referenced by a batch is in the partial blockchain.
172        // --------------------------------------------------------------------------------------------
173
174        check_batch_reference_blocks(
175            block_inputs.partial_blockchain(),
176            block_inputs.prev_block_header(),
177            &batches,
178        )?;
179
180        // Check for duplicates in the input and output notes and compute the input and output notes
181        // of the block by erasing notes that are created and consumed within this block as well as
182        // authenticating unauthenticated notes.
183        // --------------------------------------------------------------------------------------------
184
185        let (block_input_notes, block_erased_notes, block_output_notes) =
186            InputOutputNoteTracker::from_batches(
187                batches.iter(),
188                block_inputs.unauthenticated_note_proofs(),
189                block_inputs.partial_blockchain(),
190                block_inputs.prev_block_header(),
191            )?;
192
193        // All unauthenticated notes must be erased or authenticated by now.
194        if let Some(nullifier) = block_input_notes
195            .iter()
196            .find_map(|note| (!note.is_authenticated()).then_some(note.nullifier()))
197        {
198            return Err(ProposedBlockError::UnauthenticatedNoteConsumed { nullifier });
199        }
200
201        // Check for nullifiers proofs and unspent nullifiers.
202        // --------------------------------------------------------------------------------------------
203
204        let (prev_block_header, partial_blockchain, account_witnesses, mut nullifier_witnesses, _) =
205            block_inputs.into_parts();
206
207        // Remove nullifiers of erased notes, so we only add the nullifiers of actual input notes to
208        // the proposed block.
209        remove_erased_nullifiers(&mut nullifier_witnesses, block_erased_notes.into_iter());
210
211        // Check against computed block_input_notes which also contain unauthenticated notes that
212        // have been authenticated.
213        check_nullifiers(
214            &nullifier_witnesses,
215            block_input_notes.iter().map(InputNoteCommitment::nullifier),
216        )?;
217
218        // Aggregate account updates across batches.
219        // --------------------------------------------------------------------------------------------
220
221        let aggregator = AccountUpdateAggregator::from_batches(&batches)?;
222        let account_updated_witnesses = aggregator.into_update_witnesses(account_witnesses)?;
223
224        // Compute the block's output note batches from the individual batch output notes.
225        // --------------------------------------------------------------------------------------------
226
227        let output_note_batches = compute_block_output_notes(&batches, block_output_notes);
228
229        // Build proposed blocks from parts.
230        // --------------------------------------------------------------------------------------------
231
232        Ok(Self {
233            batches: OrderedBatches::new(batches),
234            timestamp,
235            account_updated_witnesses,
236            output_note_batches,
237            created_nullifiers: nullifier_witnesses,
238            partial_blockchain,
239            prev_block_header,
240        })
241    }
242
243    /// Creates a new proposed block from the provided [`BlockInputs`] and transaction batches.
244    ///
245    /// Equivalent to [`ProposedBlock::new_at`] except that the timestamp of the proposed block is
246    /// set to the current system time or the previous block header's timestamp + 1, whichever
247    /// is greater. This guarantees that the timestamp increases monotonically.
248    ///
249    /// See the [`ProposedBlock::new_at`] for details on errors and other constraints.
250    #[cfg(feature = "std")]
251    pub fn new(
252        block_inputs: BlockInputs,
253        batches: Vec<ProvenBatch>,
254    ) -> Result<Self, ProposedBlockError> {
255        let timestamp_now: u32 = std::time::SystemTime::now()
256            .duration_since(std::time::UNIX_EPOCH)
257            .expect("now should be after 1970")
258            .as_secs()
259            .try_into()
260            .expect("timestamp should fit in a u32 before the year 2106");
261
262        let timestamp = timestamp_now.max(block_inputs.prev_block_header().timestamp() + 1);
263
264        Self::new_at(block_inputs, batches, timestamp)
265    }
266
267    // ACCESSORS
268    // --------------------------------------------------------------------------------------------
269
270    /// Returns an iterator over all transactions in the block.
271    pub fn transactions(&self) -> impl Iterator<Item = &TransactionHeader> {
272        self.batches
273            .as_slice()
274            .iter()
275            .flat_map(|batch| batch.transactions().as_slice().iter())
276    }
277
278    /// Returns the block number of this proposed block.
279    pub fn block_num(&self) -> BlockNumber {
280        // The chain length is the length at the state of the previous block header, so we have to
281        // add one.
282        self.partial_blockchain().chain_length() + 1
283    }
284
285    /// Returns a reference to the slice of batches in this block.
286    pub fn batches(&self) -> &OrderedBatches {
287        &self.batches
288    }
289
290    /// Returns the map of nullifiers to their proofs from the proposed block.
291    pub fn created_nullifiers(&self) -> &BTreeMap<Nullifier, NullifierWitness> {
292        &self.created_nullifiers
293    }
294
295    /// Returns a reference to the previous block header that this block builds on top of.
296    pub fn prev_block_header(&self) -> &BlockHeader {
297        &self.prev_block_header
298    }
299
300    /// Returns the [`PartialBlockchain`] that this block contains.
301    pub fn partial_blockchain(&self) -> &PartialBlockchain {
302        &self.partial_blockchain
303    }
304
305    /// Returns a reference to the slice of accounts updated in this block.
306    pub fn updated_accounts(&self) -> &[(AccountId, AccountUpdateWitness)] {
307        &self.account_updated_witnesses
308    }
309
310    /// Returns the timestamp of this block.
311    pub fn timestamp(&self) -> u32 {
312        self.timestamp
313    }
314
315    /// Returns a slice of the [`OutputNoteBatch`] of each batch in this block.
316    pub fn output_note_batches(&self) -> &[OutputNoteBatch] {
317        &self.output_note_batches
318    }
319
320    // STATE MUTATORS
321    // --------------------------------------------------------------------------------------------
322
323    /// Consumes self and returns the non-[`Copy`] parts of the block.
324    #[allow(clippy::type_complexity)]
325    pub fn into_parts(
326        self,
327    ) -> (
328        OrderedBatches,
329        Vec<(AccountId, AccountUpdateWitness)>,
330        Vec<OutputNoteBatch>,
331        BTreeMap<Nullifier, NullifierWitness>,
332        PartialBlockchain,
333        BlockHeader,
334    ) {
335        (
336            self.batches,
337            self.account_updated_witnesses,
338            self.output_note_batches,
339            self.created_nullifiers,
340            self.partial_blockchain,
341            self.prev_block_header,
342        )
343    }
344}
345
346// SERIALIZATION
347// ================================================================================================
348
349impl Serializable for ProposedBlock {
350    fn write_into<W: ByteWriter>(&self, target: &mut W) {
351        self.batches.write_into(target);
352        self.timestamp.write_into(target);
353        self.account_updated_witnesses.write_into(target);
354        self.output_note_batches.write_into(target);
355        self.created_nullifiers.write_into(target);
356        self.partial_blockchain.write_into(target);
357        self.prev_block_header.write_into(target);
358    }
359}
360
361impl Deserializable for ProposedBlock {
362    fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
363        let block = Self {
364            batches: OrderedBatches::read_from(source)?,
365            timestamp: u32::read_from(source)?,
366            account_updated_witnesses: <Vec<(AccountId, AccountUpdateWitness)>>::read_from(source)?,
367            output_note_batches: <Vec<OutputNoteBatch>>::read_from(source)?,
368            created_nullifiers: <BTreeMap<Nullifier, NullifierWitness>>::read_from(source)?,
369            partial_blockchain: PartialBlockchain::read_from(source)?,
370            prev_block_header: BlockHeader::read_from(source)?,
371        };
372
373        Ok(block)
374    }
375}
376// HELPER FUNCTIONS
377// ================================================================================================
378
379fn check_duplicate_batches(batches: &[ProvenBatch]) -> Result<(), ProposedBlockError> {
380    let mut input_note_set = BTreeSet::new();
381
382    for batch in batches {
383        if !input_note_set.insert(batch.id()) {
384            return Err(ProposedBlockError::DuplicateBatch { batch_id: batch.id() });
385        }
386    }
387
388    Ok(())
389}
390
391fn check_timestamp_increases_monotonically(
392    provided_timestamp: u32,
393    prev_block_header: &BlockHeader,
394) -> Result<(), ProposedBlockError> {
395    if provided_timestamp <= prev_block_header.timestamp() {
396        Err(ProposedBlockError::TimestampDoesNotIncreaseMonotonically {
397            provided_timestamp,
398            previous_timestamp: prev_block_header.timestamp(),
399        })
400    } else {
401        Ok(())
402    }
403}
404
405/// Checks whether any of the batches is expired and can no longer be included in this block.
406///
407/// To illustrate, a batch which expired at block 4 cannot be included in block 5, but if it
408/// expires at block 5 then it can still be included in block 5.
409fn check_batch_expiration(
410    batches: &[ProvenBatch],
411    prev_block_header: &BlockHeader,
412) -> Result<(), ProposedBlockError> {
413    let current_block_num = prev_block_header.block_num() + 1;
414
415    for batch in batches {
416        if batch.batch_expiration_block_num() < current_block_num {
417            return Err(ProposedBlockError::ExpiredBatch {
418                batch_id: batch.id(),
419                batch_expiration_block_num: batch.batch_expiration_block_num(),
420                current_block_num,
421            });
422        }
423    }
424
425    Ok(())
426}
427
428/// Check that each nullifier in the block has a proof provided and that the nullifier is
429/// unspent. The proofs are required to update the nullifier tree.
430fn check_nullifiers(
431    nullifier_witnesses: &BTreeMap<Nullifier, NullifierWitness>,
432    block_input_notes: impl Iterator<Item = Nullifier>,
433) -> Result<(), ProposedBlockError> {
434    for block_input_note in block_input_notes {
435        match nullifier_witnesses
436            .get(&block_input_note)
437            .and_then(|x| x.proof().get(&block_input_note.as_word()))
438        {
439            Some(nullifier_value) => {
440                if nullifier_value != EMPTY_WORD {
441                    return Err(ProposedBlockError::NullifierSpent(block_input_note));
442                }
443            },
444            // If the nullifier witnesses did not contain a proof for this nullifier or the provided
445            // proof was not for this nullifier, then it's an error.
446            None => return Err(ProposedBlockError::NullifierProofMissing(block_input_note)),
447        }
448    }
449
450    Ok(())
451}
452
453/// Removes the nullifiers from the nullifier witnesses that were erased (i.e. created and consumed
454/// within the block).
455fn remove_erased_nullifiers(
456    nullifier_witnesses: &mut BTreeMap<Nullifier, NullifierWitness>,
457    block_erased_notes: impl Iterator<Item = Nullifier>,
458) {
459    for erased_note in block_erased_notes {
460        // We do not check that the nullifier was actually present to allow the block inputs to
461        // not include a nullifier that is known to belong to an erased note.
462        let _ = nullifier_witnesses.remove(&erased_note);
463    }
464}
465
466/// Checks consistency between the previous block header and the provided partial blockchain.
467///
468/// This checks that:
469/// - the chain length of the partial blockchain is equal to the block number of the previous block
470///   header, i.e. the partial blockchain's latest block is the previous' blocks reference block.
471///   The previous block header will be added to the partial blockchain as part of constructing the
472///   current block.
473/// - the root of the partial blockchain is equivalent to the chain commitment of the previous block
474///   header.
475fn check_reference_block_partial_blockchain_consistency(
476    partial_blockchain: &PartialBlockchain,
477    prev_block_header: &BlockHeader,
478) -> Result<(), ProposedBlockError> {
479    // Make sure that the current partial blockchain has blocks up to prev_block_header - 1, i.e.
480    // its chain length is equal to the block number of the previous block header.
481    if partial_blockchain.chain_length() != prev_block_header.block_num() {
482        return Err(ProposedBlockError::ChainLengthNotEqualToPreviousBlockNumber {
483            chain_length: partial_blockchain.chain_length(),
484            prev_block_num: prev_block_header.block_num(),
485        });
486    }
487
488    let chain_commitment = partial_blockchain.peaks().hash_peaks();
489    if chain_commitment != prev_block_header.chain_commitment() {
490        return Err(ProposedBlockError::ChainRootNotEqualToPreviousBlockChainCommitment {
491            chain_commitment,
492            prev_block_chain_commitment: prev_block_header.chain_commitment(),
493            prev_block_num: prev_block_header.block_num(),
494        });
495    }
496
497    Ok(())
498}
499
500/// Check that each block referenced by a batch in the block has an entry in the partial blockchain,
501/// except if the referenced block is the same as the previous block, referenced by the block.
502fn check_batch_reference_blocks(
503    partial_blockchain: &PartialBlockchain,
504    prev_block_header: &BlockHeader,
505    batches: &[ProvenBatch],
506) -> Result<(), ProposedBlockError> {
507    for batch in batches {
508        let batch_reference_block_num = batch.reference_block_num();
509        if batch_reference_block_num != prev_block_header.block_num()
510            && !partial_blockchain.contains_block(batch.reference_block_num())
511        {
512            return Err(ProposedBlockError::BatchReferenceBlockMissingFromChain {
513                reference_block_num: batch.reference_block_num(),
514                batch_id: batch.id(),
515            });
516        }
517    }
518
519    Ok(())
520}
521
522/// Computes the block's output notes from the batches of notes of each batch in the block.
523///
524/// We pass in `block_output_notes` which is the full set of output notes of the block, with output
525/// notes erased that are consumed by some batch in the block.
526///
527/// The batch output notes of each proven batch however contain all the notes that it creates,
528/// including ones that were potentially erased in `block_output_notes`. This means we have to
529/// make the batch output notes consistent with `block_output_notes` by removing the erased notes.
530/// Then it accurately represents what output notes the batch actually creates as part of the block.
531///
532/// Returns the set of [`OutputNoteBatch`]es that each batch creates.
533fn compute_block_output_notes(
534    batches: &[ProvenBatch],
535    mut block_output_notes: BTreeMap<NoteId, (BatchId, OutputNote)>,
536) -> Vec<OutputNoteBatch> {
537    let mut block_output_note_batches = Vec::with_capacity(batches.len());
538
539    for batch in batches.iter() {
540        let batch_output_notes = compute_batch_output_notes(batch, &mut block_output_notes);
541        block_output_note_batches.push(batch_output_notes);
542    }
543
544    block_output_note_batches
545}
546
547/// Computes the output note of the given batch. This is essentially the batch's output notes minus
548/// all erased notes.
549///
550/// If a note in the batch's output notes is not present in the block output notes map it means it
551/// was erased and should therefore not be added to the batch's output notes. If it is present, it
552/// is added to the set of output notes of this batch.
553///
554/// The output note set is returned.
555fn compute_batch_output_notes(
556    batch: &ProvenBatch,
557    block_output_notes: &mut BTreeMap<NoteId, (BatchId, OutputNote)>,
558) -> OutputNoteBatch {
559    // The len of the batch output notes is an upper bound of how many notes the batch could've
560    // produced so we reserve that much space to avoid reallocation.
561    let mut batch_output_notes = Vec::with_capacity(batch.output_notes().len());
562
563    for (note_idx, original_output_note) in batch.output_notes().iter().enumerate() {
564        // If block_output_notes no longer contains a note it means it was erased and we do not
565        // include it in the output notes of the current batch. We include the original index of the
566        // note in the batch so we can later correctly construct the block note tree. This index is
567        // needed because we want to be able to construct the block note tree in two ways: 1) By
568        // inserting the individual batch note trees (with erased notes removed) as subtrees into an
569        // empty block note tree or 2) by iterating the set `OutputNoteBatch`es. If we did not store
570        // the index, then the second method would assume a contiguous layout of output notes and
571        // result in a different tree than the first method.
572        //
573        // Note that because we disallow duplicate output notes, if this map contains the
574        // original note id, then we can be certain it was created by this batch and should stay
575        // in the tree. In other words, there is no ambiguity where a note originated from.
576        if let Some((_batch_id, output_note)) =
577            block_output_notes.remove(&original_output_note.id())
578        {
579            debug_assert_eq!(
580                _batch_id,
581                batch.id(),
582                "batch that contained the note originally is no longer the batch that contains it according to the provided map"
583            );
584            batch_output_notes.push((note_idx, output_note));
585        }
586    }
587
588    batch_output_notes
589}
590
591// ACCOUNT UPDATE AGGREGATOR
592// ================================================================================================
593
594struct AccountUpdateAggregator {
595    /// The map from each account to the map of each of its updates, where the digest is the state
596    /// commitment from which the contained update starts.
597    /// An invariant of this field is that if the outer map has an entry for some account, the
598    /// inner update map is guaranteed to not be empty as well.
599    updates: BTreeMap<AccountId, BTreeMap<Word, (BatchAccountUpdate, BatchId)>>,
600}
601
602impl AccountUpdateAggregator {
603    fn new() -> Self {
604        Self { updates: BTreeMap::new() }
605    }
606
607    /// Aggregates all updates for the same account and stores each update indexed by its initial
608    /// state commitment so we can easily retrieve them in the next step. This lets us
609    /// chronologically order the updates per account across batches.
610    fn from_batches(batches: &[ProvenBatch]) -> Result<Self, ProposedBlockError> {
611        let mut update_aggregator = AccountUpdateAggregator::new();
612
613        for batch in batches {
614            for (account_id, update) in batch.account_updates() {
615                update_aggregator.insert_update(*account_id, batch.id(), update.clone())?;
616            }
617        }
618
619        Ok(update_aggregator)
620    }
621
622    /// Inserts the update from one batch for a specific account into the map of updates.
623    fn insert_update(
624        &mut self,
625        account_id: AccountId,
626        batch_id: BatchId,
627        update: BatchAccountUpdate,
628    ) -> Result<(), ProposedBlockError> {
629        // As a special case, a NOOP transaction (i.e. one where the initial and final state
630        // commitment is the same) can just be ignored without changing the outcome.
631        // Without this early return, such a transaction would conflict with other state-updating
632        // transactions, because there would be two transactions that update the account from
633        // the same initial state commitment.
634        if update.initial_state_commitment() == update.final_state_commitment() {
635            return Ok(());
636        };
637
638        if let Some((conflicting_update, conflicting_batch_id)) = self
639            .updates
640            .entry(account_id)
641            .or_default()
642            .insert(update.initial_state_commitment(), (update, batch_id))
643        {
644            return Err(ProposedBlockError::ConflictingBatchesUpdateSameAccount {
645                account_id,
646                initial_state_commitment: conflicting_update.initial_state_commitment(),
647                first_batch_id: conflicting_batch_id,
648                second_batch_id: batch_id,
649            });
650        }
651
652        Ok(())
653    }
654
655    /// Consumes self and aggregates the account updates from all contained accounts.
656    /// For each updated account an entry in `account_witnesses` must be present.
657    fn into_update_witnesses(
658        self,
659        mut account_witnesses: BTreeMap<AccountId, AccountWitness>,
660    ) -> Result<Vec<(AccountId, AccountUpdateWitness)>, ProposedBlockError> {
661        let mut account_update_witnesses = Vec::with_capacity(self.updates.len());
662
663        for (account_id, updates_map) in self.updates {
664            let witness = account_witnesses
665                .remove(&account_id)
666                .ok_or(ProposedBlockError::MissingAccountWitness(account_id))?;
667
668            let account_update_witness = Self::aggregate_account(account_id, witness, updates_map)?;
669
670            account_update_witnesses.push((account_id, account_update_witness));
671        }
672
673        Ok(account_update_witnesses)
674    }
675
676    /// Build the update for a single account from the provided map of updates, where each entry is
677    /// the state from which the update starts. This chains updates for this account together in a
678    /// chronological order using the state commitments to link them.
679    fn aggregate_account(
680        account_id: AccountId,
681        initial_state_proof: AccountWitness,
682        mut updates: BTreeMap<Word, (BatchAccountUpdate, BatchId)>,
683    ) -> Result<AccountUpdateWitness, ProposedBlockError> {
684        // The account witness could prove inclusion of a different ID in which case the initial
685        // state commitment of the current ID is the empty word.
686        let initial_state_commitment = if account_id == initial_state_proof.id() {
687            initial_state_proof.state_commitment()
688        } else {
689            Word::empty()
690        };
691
692        let mut details: Option<AccountUpdateDetails> = None;
693
694        let mut current_commitment = initial_state_commitment;
695        while !updates.is_empty() {
696            let (update, _) = updates.remove(&current_commitment).ok_or_else(|| {
697                ProposedBlockError::InconsistentAccountStateTransition {
698                    account_id,
699                    state_commitment: current_commitment,
700                    remaining_state_commitments: updates.keys().copied().collect(),
701                }
702            })?;
703
704            current_commitment = update.final_state_commitment();
705            let update_details = update.into_update();
706
707            details = Some(match details {
708                None => update_details,
709                Some(details) => details.merge(update_details).map_err(|source| {
710                    ProposedBlockError::AccountUpdateError { account_id, source: Box::new(source) }
711                })?,
712            });
713        }
714
715        Ok(AccountUpdateWitness::new(
716            initial_state_commitment,
717            current_commitment,
718            initial_state_proof,
719            details.expect("details should be Some as updates is guaranteed to not be empty"),
720        ))
721    }
722}