miden_processor/fast/
trace_state.rs

1use alloc::{collections::VecDeque, sync::Arc};
2
3use miden_air::{RowIndex, trace::chiplets::hasher::HasherState};
4use miden_core::{
5    Felt, ONE, Word, ZERO,
6    crypto::merkle::MerklePath,
7    mast::{MastForest, MastNodeId},
8    stack::MIN_STACK_DEPTH,
9};
10
11use crate::{
12    AdviceError, ContextId, ErrorContext, ExecutionError,
13    continuation_stack::ContinuationStack,
14    fast::FastProcessor,
15    processor::{AdviceProviderInterface, HasherInterface, MemoryInterface},
16};
17
18// TRACE FRAGMENT CONTEXT
19// ================================================================================================
20
21/// Information required to build a trace fragment of length [super::NUM_ROWS_PER_CORE_FRAGMENT].
22///
23/// This struct is meant to be built by the processor, and consumed mutably by a trace fragment
24/// builder. That is, as trace generation progresses, this struct can be mutated to represent the
25/// generation context at any clock cycle within the fragment.
26///
27/// This struct is conceptually divided into 4 main components:
28/// 1. core trace state: the state of the processor at any clock cycle in the fragment, initialized
29///    to the state at the first clock cycle in the fragment,
30/// 2. execution replay: information needed to replay the execution of the processor for the
31///    remainder of the fragment,
32/// 3. continuation: a stack of continuations for the processor representing the nodes in the MAST
33///    forest to execute when the current node is done executing,
34/// 4. initial state: some information about the state of the execution at the start of the
35///    fragment. This includes the [MastForest] that is being executed at the start of the fragment
36///    (which can change when encountering an [miden_core::mast::ExternalNode]), and the current
37///    node's execution state, which contains additional information to pinpoint exactly where in
38///    the processing of the node we're at when this fragment begins.
39#[derive(Debug)]
40pub struct TraceFragmentContext {
41    pub state: CoreTraceState,
42    pub replay: ExecutionReplay,
43    pub continuation: ContinuationStack,
44    pub initial_mast_forest: Arc<MastForest>,
45    pub initial_execution_state: NodeExecutionState,
46}
47
48// CORE TRACE STATE
49// ================================================================================================
50
51/// Subset of the processor state used to build the core trace (system, decoder and stack sets of
52/// columns).
53#[derive(Debug)]
54pub struct CoreTraceState {
55    pub system: SystemState,
56    pub decoder: DecoderState,
57    pub stack: StackState,
58}
59
60// SYSTEM STATE
61// ================================================================================================
62
63/// The `SystemState` represents all the information needed to build one row of the System trace.
64///
65/// This struct captures the complete state of the system at a specific clock cycle, allowing for
66/// reconstruction of the system trace during concurrent execution.
67#[derive(Debug, Clone, PartialEq, Eq)]
68pub struct SystemState {
69    /// Current clock cycle (row index in the trace)
70    pub clk: RowIndex,
71
72    /// Execution context ID - starts at 0 (root context), changes on CALL/SYSCALL operations
73    pub ctx: ContextId,
74
75    /// Free memory pointer - initially set to 2^30, used for local memory offsets
76    pub fmp: Felt,
77
78    /// Flag indicating whether currently executing within a SYSCALL block
79    pub in_syscall: bool,
80
81    /// Hash of the function that initiated the current execution context
82    /// - For root context: [ZERO; 4]
83    /// - For CALL/DYNCALL contexts: hash of the called function
84    /// - For SYSCALL contexts: hash remains from the calling function
85    pub fn_hash: Word,
86}
87
88impl SystemState {
89    /// Convenience constructor that creates a new `SystemState` from a `FastProcessor`.
90    pub fn from_processor(processor: &FastProcessor) -> Self {
91        Self {
92            clk: processor.clk,
93            ctx: processor.ctx,
94            fmp: processor.fmp,
95            in_syscall: processor.in_syscall,
96            fn_hash: processor.caller_hash,
97        }
98    }
99}
100
101// DECODER STATE
102// ================================================================================================
103
104/// The subset of the decoder state required to build the trace.
105#[derive(Debug)]
106pub struct DecoderState {
107    /// The value of the [miden_air::trace::decoder::ADDR_COL_IDX] column
108    pub current_addr: Felt,
109    /// The address of the current MAST node's parent.
110    pub parent_addr: Felt,
111}
112
113// STACK STATE
114// ================================================================================================
115
116/// This struct captures the state of the top 16 elements of the stack at a specific clock cycle;
117/// that is, those elements that are written directly into the trace.
118///
119/// The stack trace consists of 19 columns total: 16 stack columns + 3 helper columns. The helper
120/// columns (stack_depth, overflow_addr, and overflow_helper) are computed from the stack_depth and
121/// last_overflow_addr fields.
122#[derive(Debug)]
123pub struct StackState {
124    /// Top 16 stack slots (s0 to s15). These represent the top elements of the stack that are
125    /// directly accessible.
126    pub stack_top: [Felt; MIN_STACK_DEPTH], // 16 columns
127
128    /// Current number of elements on the stack. It is guaranteed to be >= 16.
129    stack_depth: usize,
130
131    /// The last recorded overflow address for the stack - which is the clock cycle at which the
132    /// last item was pushed to the overflow
133    last_overflow_addr: Felt,
134}
135
136impl StackState {
137    /// Creates a new StackState with the provided parameters.
138    ///
139    /// `stack_top` should be the top 16 elements of the stack stored in reverse order, i.e.,
140    /// `stack_top[15]` is the topmost element (s0), and `stack_top[0]` is the bottommost element
141    /// (s15).
142    pub fn new(
143        stack_top: [Felt; MIN_STACK_DEPTH],
144        stack_depth: usize,
145        last_overflow_addr: Felt,
146    ) -> Self {
147        Self {
148            stack_top,
149            stack_depth,
150            last_overflow_addr,
151        }
152    }
153
154    /// Returns the value at the specified index in the stack top.
155    ///
156    /// # Panics
157    /// - if the index is greater than or equal to [MIN_STACK_DEPTH].
158    pub fn get(&self, index: usize) -> Felt {
159        self.stack_top[MIN_STACK_DEPTH - index - 1]
160    }
161
162    /// Returns the stack depth (b0 helper column)
163    pub fn stack_depth(&self) -> usize {
164        self.stack_depth
165    }
166
167    /// Returns the overflow address (b1 helper column) using the stack overflow replay
168    pub fn overflow_addr(&mut self) -> Felt {
169        self.last_overflow_addr
170    }
171
172    /// Returns the number of elements in the current context's overflow stack.
173    pub fn num_overflow_elements_in_current_ctx(&self) -> usize {
174        debug_assert!(self.stack_depth >= MIN_STACK_DEPTH);
175        self.stack_depth - MIN_STACK_DEPTH
176    }
177
178    /// Pushes the given element onto the overflow stack at the provided clock cycle.
179    pub fn push_overflow(&mut self, _element: Felt, clk: RowIndex) {
180        self.stack_depth += 1;
181        self.last_overflow_addr = clk.into();
182    }
183
184    /// Pops the top element from the overflow stack at the provided clock cycle, if any.
185    ///
186    /// If the overflow table is empty (i.e. stack depth is 16), the stack depth is unchanged, and
187    /// None is returned.
188    pub fn pop_overflow(
189        &mut self,
190        stack_overflow_replay: &mut StackOverflowReplay,
191    ) -> Option<Felt> {
192        debug_assert!(self.stack_depth >= MIN_STACK_DEPTH);
193
194        if self.stack_depth > MIN_STACK_DEPTH {
195            let (stack_value, new_overflow_addr) = stack_overflow_replay.replay_pop_overflow();
196            self.stack_depth -= 1;
197            self.last_overflow_addr = new_overflow_addr;
198            Some(stack_value)
199        } else {
200            self.last_overflow_addr = ZERO;
201            None
202        }
203    }
204
205    /// Derives the denominator of the overflow helper (h0 helper column) from the current stack
206    /// depth.
207    ///
208    /// It is expected that this values gets later inverted via batch inversion.
209    pub fn overflow_helper(&self) -> Felt {
210        let denominator = self.stack_depth() - MIN_STACK_DEPTH;
211        Felt::new(denominator as u64)
212    }
213
214    /// Starts a new execution context for this stack and returns a tuple consisting of the current
215    /// stack depth and the address of the overflow table row prior to starting the new context.
216    ///
217    /// This has the effect of hiding the contents of the overflow table such that it appears as
218    /// if the overflow table in the new context is empty.
219    pub fn start_context(&mut self) -> (usize, Felt) {
220        // Return the current stack depth and overflow address at the start of a new context
221        let current_depth = self.stack_depth;
222        let current_overflow_addr = self.last_overflow_addr;
223
224        // Reset stack depth to minimum (parallel to Process Stack behavior)
225        self.stack_depth = MIN_STACK_DEPTH;
226        self.last_overflow_addr = ZERO;
227
228        (current_depth, current_overflow_addr)
229    }
230
231    /// Restores the prior context for this stack.
232    ///
233    /// This has the effect bringing back items previously hidden from the overflow table.
234    pub fn restore_context(&mut self, stack_overflow_replay: &mut StackOverflowReplay) {
235        let (stack_depth, last_overflow_addr) =
236            stack_overflow_replay.replay_restore_context_overflow_addr();
237        // Restore stack depth to the value from before the context switch (parallel to Process
238        // Stack behavior)
239        self.stack_depth = stack_depth;
240        self.last_overflow_addr = last_overflow_addr;
241    }
242}
243
244/// Replay data necessary to build a trace fragment.
245///
246/// During execution, the processor records information to be replayed by the corresponding trace
247/// generator. This is done due to the fact that the trace generators don't have access to some
248/// components needed to produce those values, such as the memory chiplet, advice provider, etc. It
249/// also packages up all the necessary data for trace generators to generate trace fragments, which
250/// can be done on separate machines in parallel, for example.
251#[derive(Debug, Default)]
252pub struct ExecutionReplay {
253    pub block_stack: BlockStackReplay,
254    pub stack_overflow: StackOverflowReplay,
255    pub memory: MemoryReplay,
256    pub advice: AdviceReplay,
257    pub hasher: HasherReplay,
258    pub external_node: ExternalNodeReplay,
259}
260
261// BLOCK STACK REPLAY
262// ================================================================================================
263
264/// Replay data for the block stack.
265#[derive(Debug, Default)]
266pub struct BlockStackReplay {
267    /// The parent address - needed for each node start operation (JOIN, SPLIT, etc).
268    node_start: VecDeque<Felt>,
269    /// The data needed to recover the state on an END operation.
270    node_end: VecDeque<NodeEndData>,
271    /// Extra data needed to recover the state on an END operation specifically for
272    /// CALL/SYSCALL/DYNCALL nodes (which start/end a new execution context).
273    execution_contexts: VecDeque<ExecutionContextSystemInfo>,
274}
275
276impl BlockStackReplay {
277    /// Creates a new instance of `BlockStackReplay`.
278    pub fn new() -> Self {
279        Self {
280            node_start: VecDeque::new(),
281            node_end: VecDeque::new(),
282            execution_contexts: VecDeque::new(),
283        }
284    }
285
286    /// Records the node's parent address
287    pub fn record_node_start(&mut self, parent_addr: Felt) {
288        self.node_start.push_back(parent_addr);
289    }
290
291    /// Records the necessary data needed to properly recover the state on an END operation.
292    ///
293    /// See [NodeEndData] for more details.
294    pub fn record_node_end(
295        &mut self,
296        ended_node_addr: Felt,
297        flags: NodeFlags,
298        prev_addr: Felt,
299        prev_parent_addr: Felt,
300    ) {
301        self.node_end.push_back(NodeEndData {
302            ended_node_addr,
303            flags,
304            prev_addr,
305            prev_parent_addr,
306        });
307    }
308
309    /// Records an execution context system info for a CALL/SYSCALL/DYNCALL operation.
310    pub fn record_execution_context(&mut self, ctx_info: ExecutionContextSystemInfo) {
311        self.execution_contexts.push_back(ctx_info);
312    }
313
314    /// Replays the node's parent address
315    pub fn replay_node_start(&mut self) -> Felt {
316        self.node_start.pop_front().expect("No node start address recorded")
317    }
318
319    /// Replays the data needed to recover the state on an END operation.
320    pub fn replay_node_end(&mut self) -> NodeEndData {
321        self.node_end.pop_front().expect("No node address and flags recorded")
322    }
323
324    /// Replays the next recorded execution context system info.
325    pub fn replay_execution_context(&mut self) -> ExecutionContextSystemInfo {
326        self.execution_contexts.pop_front().expect("No execution context recorded")
327    }
328}
329
330/// The flags written in the second word of the hasher state for END operations.
331#[derive(Debug)]
332pub struct NodeFlags {
333    is_loop_body: bool,
334    loop_entered: bool,
335    is_call: bool,
336    is_syscall: bool,
337}
338
339impl NodeFlags {
340    /// Creates a new instance of `NodeFlags`.
341    pub fn new(is_loop_body: bool, loop_entered: bool, is_call: bool, is_syscall: bool) -> Self {
342        Self {
343            is_loop_body,
344            loop_entered,
345            is_call,
346            is_syscall,
347        }
348    }
349
350    /// Returns ONE if this node is a body of a LOOP node; otherwise returns ZERO.
351    pub fn is_loop_body(&self) -> Felt {
352        if self.is_loop_body { ONE } else { ZERO }
353    }
354
355    /// Returns ONE if this is a LOOP node and the body of the loop was executed at
356    /// least once; otherwise, returns ZERO.
357    pub fn loop_entered(&self) -> Felt {
358        if self.loop_entered { ONE } else { ZERO }
359    }
360
361    /// Returns ONE if this node is a CALL or DYNCALL; otherwise returns ZERO.
362    pub fn is_call(&self) -> Felt {
363        if self.is_call { ONE } else { ZERO }
364    }
365
366    /// Returns ONE if this node is a SYSCALL; otherwise returns ZERO.
367    pub fn is_syscall(&self) -> Felt {
368        if self.is_syscall { ONE } else { ZERO }
369    }
370
371    /// Convenience method that writes the flags in the proper order to be written to the second
372    /// word of the hasher state for the trace row of an END operation.
373    pub fn to_hasher_state_second_word(&self) -> Word {
374        [self.is_loop_body(), self.loop_entered(), self.is_call(), self.is_syscall()].into()
375    }
376}
377
378/// The data needed to fully recover the state on an END operation.
379///
380/// We record `ended_node_addr` and `flags` in order to be able to properly populate the trace
381/// row for the node operation. Additionally, we record `prev_addr` and `prev_parent_addr` to
382/// allow emulating peeking into the block stack, which is needed when processing REPEAT or RESPAN
383/// nodes.
384#[derive(Debug)]
385pub struct NodeEndData {
386    /// the address of the node that is ending
387    pub ended_node_addr: Felt,
388    /// the flags associated with the node that is ending
389    pub flags: NodeFlags,
390    /// the address of the node sitting on top of the block stack after the END operation (or 0 if
391    /// the block stack is empty)
392    pub prev_addr: Felt,
393    /// the parent address of the node sitting on top of the block stack after the END operation
394    /// (or 0 if the block stack is empty)
395    pub prev_parent_addr: Felt,
396}
397
398/// Data required to recover the state of an execution context when restoring it during an END
399/// operation.
400#[derive(Debug)]
401pub struct ExecutionContextSystemInfo {
402    pub parent_ctx: ContextId,
403    pub parent_fn_hash: Word,
404    pub parent_fmp: Felt,
405}
406
407// EXTERNAL NODE REPLAY
408// ================================================================================================
409
410#[derive(Debug)]
411pub struct ExternalNodeReplay {
412    external_node_resolutions: VecDeque<(MastNodeId, Arc<MastForest>)>,
413}
414
415impl Default for ExternalNodeReplay {
416    fn default() -> Self {
417        Self::new()
418    }
419}
420
421impl ExternalNodeReplay {
422    /// Creates a new ExternalNodeReplay with an empty resolution queue
423    pub fn new() -> Self {
424        Self {
425            external_node_resolutions: VecDeque::new(),
426        }
427    }
428
429    /// Records a resolution of an external node to a MastNodeId with its associated MastForest
430    pub fn record_resolution(&mut self, node_id: MastNodeId, forest: Arc<MastForest>) {
431        self.external_node_resolutions.push_back((node_id, forest));
432    }
433
434    /// Replays the next recorded external node resolution, returning both the node ID and forest
435    pub fn replay_resolution(&mut self) -> (MastNodeId, Arc<MastForest>) {
436        self.external_node_resolutions
437            .pop_front()
438            .expect("No external node resolutions recorded")
439    }
440}
441
442// MEMORY REPLAY
443// ================================================================================================
444
445/// Implements a shim for the memory chiplet, in which all elements read from memory during a given
446/// fragment are recorded by the fast processor, and replayed by the main trace fragment generators.
447///
448/// This is used to simulate memory reads in parallel trace generation without needing to actually
449/// access the memory chiplet. Writes are not recorded here, as they are not needed for the trace
450/// generation process.
451///
452/// Elements/words read are stored with their addresses and are assumed to be read from the same
453/// addresses that they were recorded at. This works naturally since the fast processor has exactly
454/// the same access patterns as the main trace generators (which re-executes part of the program).
455/// The read methods include debug assertions to verify address consistency.
456#[derive(Debug, Default)]
457pub struct MemoryReplay {
458    elements_read: VecDeque<(Felt, Felt)>,
459    words_read: VecDeque<(Felt, Word)>,
460}
461
462impl MemoryReplay {
463    // MUTATIONS (populated by the fast processor)
464    // --------------------------------------------------------------------------------
465
466    /// Records a read element from memory
467    pub fn record_read_element(&mut self, element: Felt, addr: Felt) {
468        self.elements_read.push_back((addr, element));
469    }
470
471    /// Records a read word from memory
472    pub fn record_read_word(&mut self, word: Word, addr: Felt) {
473        self.words_read.push_back((addr, word));
474    }
475
476    // ACCESSORS
477    // --------------------------------------------------------------------------------
478
479    pub fn replay_read_element(&mut self, addr: Felt) -> Felt {
480        let (stored_addr, element) =
481            self.elements_read.pop_front().expect("No elements read from memory");
482        debug_assert_eq!(stored_addr, addr, "Address mismatch: expected {addr}, got {stored_addr}");
483        element
484    }
485
486    pub fn replay_read_word(&mut self, addr: Felt) -> Word {
487        let (stored_addr, word) = self.words_read.pop_front().expect("No words read from memory");
488        debug_assert_eq!(stored_addr, addr, "Address mismatch: expected {addr}, got {stored_addr}");
489        word
490    }
491}
492
493impl MemoryInterface for MemoryReplay {
494    fn read_element(
495        &mut self,
496        _ctx: ContextId,
497        addr: Felt,
498        _err_ctx: &impl ErrorContext,
499    ) -> Result<Felt, crate::MemoryError> {
500        Ok(self.replay_read_element(addr))
501    }
502
503    fn read_word(
504        &mut self,
505        _ctx: ContextId,
506        addr: Felt,
507        _clk: RowIndex,
508        _err_ctx: &impl ErrorContext,
509    ) -> Result<Word, crate::MemoryError> {
510        Ok(self.replay_read_word(addr))
511    }
512
513    fn write_element(
514        &mut self,
515        _ctx: ContextId,
516        _addr: Felt,
517        _element: Felt,
518        _err_ctx: &impl ErrorContext,
519    ) -> Result<(), crate::MemoryError> {
520        Ok(())
521    }
522
523    fn write_word(
524        &mut self,
525        _ctx: ContextId,
526        _addr: Felt,
527        _clk: RowIndex,
528        _word: Word,
529        _err_ctx: &impl ErrorContext,
530    ) -> Result<(), crate::MemoryError> {
531        Ok(())
532    }
533}
534
535// ADVICE REPLAY
536// ================================================================================================
537
538/// Implements a shim for the advice provider, in which all advice provider operations during a
539/// given fragment are pre-recorded by the fast processor.
540///
541/// This is used to simulate advice provider interactions in parallel trace generation without
542/// needing to actually access the advice provider. All advice provider operations are recorded
543/// during fast execution and then replayed during parallel trace generation.
544///
545/// The shim records all operations with their parameters and results, and provides replay methods
546/// that return the pre-recorded results. This works naturally since the fast processor has exactly
547/// the same access patterns as the main trace generators (which re-executes part of the program).
548/// The read methods include debug assertions to verify parameter consistency.
549#[derive(Debug, Default)]
550pub struct AdviceReplay {
551    // Stack operations
552    stack_pops: VecDeque<Felt>,
553    stack_word_pops: VecDeque<Word>,
554    stack_dword_pops: VecDeque<[Word; 2]>,
555}
556
557impl AdviceReplay {
558    // MUTATIONS (populated by the fast processor)
559    // --------------------------------------------------------------------------------
560
561    /// Records the value returned by a pop_stack operation
562    pub fn record_pop_stack(&mut self, value: Felt) {
563        self.stack_pops.push_back(value);
564    }
565
566    /// Records the word returned by a pop_stack_word operation
567    pub fn record_pop_stack_word(&mut self, word: Word) {
568        self.stack_word_pops.push_back(word);
569    }
570
571    /// Records the double word returned by a pop_stack_dword operation
572    pub fn record_pop_stack_dword(&mut self, dword: [Word; 2]) {
573        self.stack_dword_pops.push_back(dword);
574    }
575
576    // ACCESSORS (used during parallel trace generation)
577    // --------------------------------------------------------------------------------
578
579    /// Replays a pop_stack operation, returning the previously recorded value
580    pub fn replay_pop_stack(&mut self) -> Felt {
581        self.stack_pops.pop_front().expect("No stack pop operations recorded")
582    }
583
584    /// Replays a pop_stack_word operation, returning the previously recorded word
585    pub fn replay_pop_stack_word(&mut self) -> Word {
586        self.stack_word_pops.pop_front().expect("No stack word pop operations recorded")
587    }
588
589    /// Replays a pop_stack_dword operation, returning the previously recorded double word
590    pub fn replay_pop_stack_dword(&mut self) -> [Word; 2] {
591        self.stack_dword_pops
592            .pop_front()
593            .expect("No stack dword pop operations recorded")
594    }
595}
596
597impl AdviceProviderInterface for AdviceReplay {
598    fn pop_stack(&mut self) -> Result<Felt, AdviceError> {
599        Ok(self.replay_pop_stack())
600    }
601
602    fn pop_stack_word(&mut self) -> Result<Word, AdviceError> {
603        Ok(self.replay_pop_stack_word())
604    }
605
606    fn pop_stack_dword(&mut self) -> Result<[Word; 2], AdviceError> {
607        Ok(self.replay_pop_stack_dword())
608    }
609
610    /// Returns an empty Merkle path, as Merkle paths are ignored in parallel trace generation.
611    fn get_merkle_path(
612        &self,
613        _root: Word,
614        _depth: &Felt,
615        _index: &Felt,
616    ) -> Result<Option<MerklePath>, AdviceError> {
617        Ok(None)
618    }
619
620    /// Returns an empty Merkle path and root, as they are ignored in parallel trace generation.
621    fn update_merkle_node(
622        &mut self,
623        _root: Word,
624        _depth: &Felt,
625        _index: &Felt,
626        _value: Word,
627    ) -> Result<Option<MerklePath>, AdviceError> {
628        Ok(None)
629    }
630}
631
632// HASHER REPLAY
633// ================================================================================================
634
635/// Implements a shim for the hasher chiplet, in which all hasher operations during a given
636/// fragment are pre-recorded by the fast processor.
637///
638/// This is used to simulate hasher operations in parallel trace generation without needing
639/// to actually perform hash computations. All hasher operations are recorded during fast
640/// execution and then replayed during parallel trace generation.
641#[derive(Debug, Default)]
642pub struct HasherReplay {
643    /// Recorded hasher addresses from operations like hash_control_block, hash_basic_block, etc.
644    block_addresses: VecDeque<Felt>,
645
646    /// Recorded hasher operations from permutation operations (HPerm).
647    ///
648    /// Each entry contains (address, output_state)
649    permutation_operations: VecDeque<(Felt, [Felt; 12])>,
650
651    /// Recorded hasher operations from Merkle path verification operations.
652    ///
653    /// Each entry contains (address, computed_root)
654    build_merkle_root_operations: VecDeque<(Felt, Word)>,
655
656    /// Recorded hasher operations from Merkle root update operations.
657    ///
658    /// Each entry contains (address, old_root, new_root)
659    mrupdate_operations: VecDeque<(Felt, Word, Word)>,
660}
661
662impl HasherReplay {
663    // MUTATIONS (populated by the fast processor)
664    // --------------------------------------------------------------------------------------------
665
666    /// Records the address associated with a `Hasher::hash_control_block` or
667    /// `Hasher::hash_basic_block` operation.
668    pub fn record_block_address(&mut self, addr: Felt) {
669        self.block_addresses.push_back(addr);
670    }
671
672    /// Records a `Hasher::permute` operation with its address and result (after applying the
673    /// permutation)
674    pub fn record_permute(&mut self, addr: Felt, hashed_state: [Felt; 12]) {
675        self.permutation_operations.push_back((addr, hashed_state));
676    }
677
678    /// Records a Merkle path verification with its address and computed root
679    pub fn record_build_merkle_root(&mut self, addr: Felt, computed_root: Word) {
680        self.build_merkle_root_operations.push_back((addr, computed_root));
681    }
682
683    /// Records a Merkle root update with its address, old root, and new root
684    pub fn record_update_merkle_root(&mut self, addr: Felt, old_root: Word, new_root: Word) {
685        self.mrupdate_operations.push_back((addr, old_root, new_root));
686    }
687
688    // ACCESSORS (used by parallel trace generators)
689    // --------------------------------------------------------------------------------------------
690
691    /// Replays a `Hasher::hash_control_block` or `Hasher::hash_basic_block` operation, returning
692    /// the pre-recorded address
693    pub fn replay_block_address(&mut self) -> Felt {
694        self.block_addresses.pop_front().expect("No block address operations recorded")
695    }
696
697    /// Replays a `Hasher::permute` operation, returning its address and result
698    pub fn replay_permute(&mut self) -> (Felt, [Felt; 12]) {
699        self.permutation_operations
700            .pop_front()
701            .expect("No permutation operations recorded")
702    }
703
704    /// Replays a Merkle path verification, returning the pre-recorded address and computed root
705    pub fn replay_build_merkle_root(&mut self) -> (Felt, Word) {
706        self.build_merkle_root_operations
707            .pop_front()
708            .expect("No build merkle root operations recorded")
709    }
710
711    /// Replays a Merkle root update, returning the pre-recorded address, old root, and new root
712    pub fn replay_update_merkle_root(&mut self) -> (Felt, Word, Word) {
713        self.mrupdate_operations.pop_front().expect("No mrupdate operations recorded")
714    }
715}
716
717impl HasherInterface for HasherReplay {
718    fn permute(&mut self, _state: HasherState) -> (Felt, HasherState) {
719        self.replay_permute()
720    }
721
722    fn verify_merkle_root(
723        &mut self,
724        claimed_root: Word,
725        _value: Word,
726        _path: Option<&MerklePath>,
727        _index: Felt,
728        on_err: impl FnOnce() -> ExecutionError,
729    ) -> Result<Felt, ExecutionError> {
730        let (addr, computed_root) = self.replay_build_merkle_root();
731        if claimed_root == computed_root {
732            Ok(addr)
733        } else {
734            // If the hasher doesn't compute the same root (using the same path),
735            // then it means that `node` is not the value currently in the tree at `index`
736            Err(on_err())
737        }
738    }
739
740    fn update_merkle_root(
741        &mut self,
742        claimed_old_root: Word,
743        _old_value: Word,
744        _new_value: Word,
745        _path: Option<&MerklePath>,
746        _index: Felt,
747        on_err: impl FnOnce() -> ExecutionError,
748    ) -> Result<(Felt, Word), ExecutionError> {
749        let (address, old_root, new_root) = self.replay_update_merkle_root();
750
751        if claimed_old_root == old_root {
752            Ok((address, new_root))
753        } else {
754            Err(on_err())
755        }
756    }
757}
758
759// STACK OVERFLOW REPLAY
760// ================================================================================================
761
762/// Implements a shim for stack overflow operations, in which all overflow values and addresses
763/// during a given fragment are pre-recorded by the fast processor and replayed by the main trace
764/// fragment generators.
765///
766/// This is used to simulate stack overflow functionality in parallel trace generation without
767/// needing to maintain the actual overflow table. All overflow operations are recorded during
768/// fast execution and then replayed during parallel trace generation.
769///
770/// The shim records overflow values (from pop operations) and overflow addresses (representing
771/// the clock cycle of the last overflow update) and provides replay methods that return the
772/// pre-recorded values. This works naturally since the fast processor has exactly the same
773/// access patterns as the main trace generators.
774#[derive(Debug)]
775pub struct StackOverflowReplay {
776    /// Recorded overflow values and overflow addresses from pop_overflow operations. Each entry
777    /// represents a value that was popped from the overflow stack, and the overflow address of the
778    /// entry at the top of the overflow stack *after* the pop operation.
779    ///
780    /// For example, given the following table:
781    ///
782    /// | Overflow Value | Overflow Address |
783    /// |----------------|------------------|
784    /// |      8         |         14       |
785    /// |      2         |         16       |
786    /// |      7         |         18       |
787    ///
788    /// a `pop_overflow()` operation would return (popped_value, prev_addr) = (7, 16).
789    overflow_values: VecDeque<(Felt, Felt)>,
790
791    /// Recorded (stack depth, overflow address) returned when restoring a context
792    restore_context_info: VecDeque<(usize, Felt)>,
793}
794
795impl Default for StackOverflowReplay {
796    fn default() -> Self {
797        Self::new()
798    }
799}
800
801impl StackOverflowReplay {
802    /// Creates a new StackOverflowReplay with empty operation vectors
803    pub fn new() -> Self {
804        Self {
805            overflow_values: VecDeque::new(),
806            restore_context_info: VecDeque::new(),
807        }
808    }
809
810    // MUTATORS
811    // --------------------------------------------------------------------------------
812
813    /// Records the value returned by a pop_overflow operation, along with the overflow address
814    /// stored in the overflow table *after* the pop. That is, `new_overflow_addr` represents the
815    /// clock cycle at which the value *before* `value` was added to the overflow table. See the
816    /// docstring for the `overflow_values` field for more information.
817    ///
818    /// This *must* only be called if there is an actual value in the overflow table to pop; that
819    /// is, don't call if the stack depth is 16.
820    pub fn record_pop_overflow(&mut self, value: Felt, new_overflow_addr: Felt) {
821        self.overflow_values.push_back((value, new_overflow_addr));
822    }
823
824    /// Records the overflow address when restoring a context
825    pub fn record_restore_context_overflow_addr(&mut self, stack_depth: usize, addr: Felt) {
826        self.restore_context_info.push_back((stack_depth, addr));
827    }
828
829    // ACCESSORS
830    // --------------------------------------------------------------------------------
831
832    /// Replays a pop_overflow operation, returning the previously recorded value and
833    /// `new_overflow_addr`.
834    ///
835    /// This *must* only be called if there is an actual value in the overflow table to pop; that
836    /// is, don't call if the stack depth is 16.
837    ///
838    /// See [Self::record_pop_overflow] for more details.
839    pub fn replay_pop_overflow(&mut self) -> (Felt, Felt) {
840        self.overflow_values.pop_front().expect("No overflow pop operations recorded")
841    }
842
843    /// Replays the overflow address when restoring a context
844    pub fn replay_restore_context_overflow_addr(&mut self) -> (usize, Felt) {
845        self.restore_context_info
846            .pop_front()
847            .expect("No overflow address operations recorded")
848    }
849}
850
851// NODE EXECUTION STATE
852// ================================================================================================
853
854/// Specifies the execution state of a node.
855///
856/// Each MAST node has at least 2 different states associated with it: processing the START and END
857/// nodes (e.g. JOIN and END in the case of [miden_core::mast::JoinNode]). Some have more; for
858/// example, [miden_core::mast::BasicBlockNode] has BASIC BLOCK and END, in addition to one state
859/// for each operation in the basic block. Since a trace fragment can begin at any clock cycle
860/// (determined by [super::NUM_ROWS_PER_CORE_FRAGMENT]), specifying which MAST node we're executing
861/// is insufficient; we also have to specify *at what point* during the execution of this node we
862/// are at. This is the information that this type is meant to encode.
863#[derive(Debug, Clone, PartialEq, Eq)]
864pub enum NodeExecutionState {
865    /// Resume execution within a basic block at a specific batch and operation index.
866    /// This is used when continuing execution mid-way through a basic block.
867    BasicBlock {
868        /// Node ID of the basic block being executed
869        node_id: MastNodeId,
870        /// Index of the operation batch within the basic block
871        batch_index: usize,
872        /// Index of the operation within the batch
873        op_idx_in_batch: usize,
874    },
875    /// Execute a control flow node (JOIN, SPLIT, LOOP, etc.) from the start. This is used when
876    /// beginning execution of a control flow construct.
877    Start(MastNodeId),
878    /// Execute a RESPAN for the specified batch within the specified basic block.
879    Respan {
880        /// Node ID of the basic block being executed
881        node_id: MastNodeId,
882        /// Index of the operation batch within the basic block
883        batch_index: usize,
884    },
885    /// Execute a Loop node, starting at a REPEAT operation.
886    LoopRepeat(MastNodeId),
887    /// Execute the END phase of a control flow node (JOIN, SPLIT, LOOP, etc.).
888    /// This is used when completing execution of a control flow construct.
889    End(MastNodeId),
890}