miden-processor 0.22.1

Miden VM processor
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
//! Module which concerns itself with all the trace row building logic.

use miden_air::trace::{
    CLK_COL_IDX, CTX_COL_IDX, DECODER_TRACE_OFFSET, FN_HASH_OFFSET, STACK_TRACE_OFFSET,
    SYS_TRACE_WIDTH,
    chiplets::hasher::HASH_CYCLE_LEN_FELT,
    decoder::{
        ADDR_COL_IDX, GROUP_COUNT_COL_IDX, HASHER_STATE_OFFSET, IN_SPAN_COL_IDX,
        NUM_OP_BATCH_FLAGS, NUM_OP_BITS, NUM_USER_OP_HELPERS, OP_BATCH_FLAGS_OFFSET,
        OP_BITS_EXTRA_COLS_OFFSET, OP_BITS_OFFSET, OP_INDEX_COL_IDX,
    },
    stack::{B0_COL_IDX, B1_COL_IDX, H0_COL_IDX, STACK_TOP_OFFSET, STACK_TOP_RANGE},
};
use miden_core::{
    Felt, ONE, Word, ZERO,
    mast::{
        BasicBlockNode, CallNode, JoinNode, LoopNode, MastForest, MastNodeExt, OpBatch, SplitNode,
    },
    operations::{Operation, opcodes},
};

use super::{ExecutionContextInfo, StackState, SystemState, get_node_in_forest};
use crate::{
    ExecutionError,
    trace::parallel::{core_trace_fragment::BasicBlockContext, tracer::CoreTraceGenerationTracer},
};

// DECODER ROW
// ================================================================================================

/// The data necessary to build the decoder part of a trace row.
#[derive(Debug)]
struct DecoderRow {
    /// The address field to write into trace
    pub addr: Felt,
    /// The operation code for start operations
    pub opcode: u8,
    /// The two child hashes for start operations (first hash, second hash)
    pub hasher_state: (Word, Word),
    /// Whether this row is an operation within a basic block
    pub in_basic_block: bool,
    /// The group count for this operation
    pub group_count: Felt,
    /// The index of the operation within its operation group, or 0 if this is not a row containing
    /// an operation in a basic block.
    pub op_index: Felt,
    /// The operation batch flags, encoding the number of groups present in the current operation
    /// batch.
    pub op_batch_flags: [Felt; NUM_OP_BATCH_FLAGS],
}

impl DecoderRow {
    /// Creates a new `DecoderRow` for control flow operations (JOIN/SPLIT start or end).
    ///
    /// Control flow operations do not occur within basic blocks, so the relevant fields are set
    /// to their default values.
    pub fn new_control_flow(opcode: u8, hasher_state: (Word, Word), addr: Felt) -> Self {
        Self {
            opcode,
            hasher_state,
            addr,
            in_basic_block: false,
            group_count: ZERO,
            op_index: ZERO,
            op_batch_flags: [ZERO; NUM_OP_BATCH_FLAGS],
        }
    }

    /// Creates a new `DecoderRow` for the start of a new batch in a basic block.
    ///
    /// This corresponds either to the SPAN or RESPAN operations.
    pub fn new_basic_block_batch(
        start_op: BasicBlockStartOperation,
        op_batch: &OpBatch,
        addr: Felt,
        group_count: Felt,
    ) -> Result<Self, ExecutionError> {
        let opcode = match start_op {
            BasicBlockStartOperation::Span => opcodes::SPAN,
            BasicBlockStartOperation::Respan => opcodes::RESPAN,
        };

        let hasher_state = (
            op_batch.groups()[0..4].try_into().expect("slice with incorrect length"),
            op_batch.groups()[4..8].try_into().expect("slice with incorrect length"),
        );

        Ok(Self {
            opcode,
            hasher_state,
            addr,
            in_basic_block: false,
            group_count,
            op_index: ZERO,
            op_batch_flags: get_op_batch_flags(group_count)?,
        })
    }

    /// Creates a new `DecoderRow` for an operation within a basic block.
    pub fn new_operation(
        operation: Operation,
        current_addr: Felt,
        parent_addr: Felt,
        op_idx_in_group: usize,
        basic_block_ctx: &BasicBlockContext,
        user_op_helpers: [Felt; NUM_USER_OP_HELPERS],
    ) -> Self {
        let hasher_state: (Word, Word) = {
            let word1 = [
                basic_block_ctx.current_op_group,
                parent_addr,
                user_op_helpers[0],
                user_op_helpers[1],
            ];
            let word2 =
                [user_op_helpers[2], user_op_helpers[3], user_op_helpers[4], user_op_helpers[5]];

            (word1.into(), word2.into())
        };

        Self {
            opcode: operation.op_code(),
            hasher_state,
            addr: current_addr,
            in_basic_block: true,
            group_count: basic_block_ctx.group_count_in_block,
            op_index: Felt::from_u32(op_idx_in_group as u32),
            op_batch_flags: [ZERO; NUM_OP_BATCH_FLAGS],
        }
    }
}

/// Enum representing the type of operation that starts a basic block.
#[derive(Debug)]
enum BasicBlockStartOperation {
    Span,
    Respan,
}

// BASIC BLOCK TRACE ROW METHODS
// ================================================================================================

impl<'a> CoreTraceGenerationTracer<'a> {
    /// Fills a trace row for SPAN start operation to the main trace fragment.
    ///
    /// This method creates a trace row that corresponds to the SPAN operation that starts
    /// a basic block execution.
    pub fn fill_basic_block_start_trace_row(
        &mut self,
        system: &SystemState,
        stack: &StackState,
        basic_block_node: &BasicBlockNode,
    ) -> Result<(), ExecutionError> {
        let group_count_for_block = Felt::from_u32(basic_block_node.num_op_groups() as u32);
        let first_op_batch = basic_block_node
            .op_batches()
            .first()
            .ok_or(ExecutionError::Internal("basic block should have at least one op batch"))?;

        let decoder_row = DecoderRow::new_basic_block_batch(
            BasicBlockStartOperation::Span,
            first_op_batch,
            self.decoder_state.parent_addr,
            group_count_for_block,
        )?;
        self.fill_trace_row(system, stack, decoder_row);
        Ok(())
    }

    /// Fills a trace row for SPAN end operation to the main trace fragment.
    ///
    /// This method creates a trace row that corresponds to the END operation that completes
    /// a basic block execution.
    pub fn fill_basic_block_end_trace_row(
        &mut self,
        system: &SystemState,
        stack: &StackState,
        basic_block_node: &BasicBlockNode,
    ) -> Result<(), ExecutionError> {
        let (ended_node_addr, flags) =
            self.decoder_state.replay_node_end(&mut self.block_stack_replay)?;

        let decoder_row = DecoderRow::new_control_flow(
            opcodes::END,
            (basic_block_node.digest(), flags.to_hasher_state_second_word()),
            ended_node_addr,
        );

        self.fill_trace_row(system, stack, decoder_row);
        Ok(())
    }

    // RESPAN
    // -------------------------------------------------------------------------------------------

    /// Processes a RESPAN operation that starts processing of a new operation batch within
    /// the same basic block.
    ///
    /// This method updates the processor state and adds a corresponding trace row
    /// to the main trace fragment.
    pub fn fill_respan_trace_row(
        &mut self,
        system: &SystemState,
        stack: &StackState,
        op_batch: &OpBatch,
        basic_block_context: &mut BasicBlockContext,
    ) -> Result<(), ExecutionError> {
        // Add RESPAN trace row
        {
            let decoder_row = DecoderRow::new_basic_block_batch(
                BasicBlockStartOperation::Respan,
                op_batch,
                self.decoder_state.current_addr,
                basic_block_context.group_count_in_block,
            )?;
            self.fill_trace_row(system, stack, decoder_row);
        }

        // Update block address for the upcoming block
        self.decoder_state.current_addr += HASH_CYCLE_LEN_FELT;

        // Update basic block context
        basic_block_context.group_count_in_block -= ONE;
        basic_block_context.current_op_group = op_batch.groups()[0];

        Ok(())
    }

    /// Writes a trace row for an operation within a basic block.
    ///
    /// This must be called *after* the operation has been executed and the
    /// stack has been updated.
    pub fn fill_operation_trace_row(
        &mut self,
        system: &SystemState,
        stack: &StackState,
        operation: Operation,
        op_idx_in_group: usize,
        user_op_helpers: [Felt; NUM_USER_OP_HELPERS],
        basic_block_context: &mut BasicBlockContext,
    ) {
        // update operations left to be executed in the group
        basic_block_context.remove_operation_from_current_op_group();

        // Add trace row
        let decoder_row = DecoderRow::new_operation(
            operation,
            self.decoder_state.current_addr,
            self.decoder_state.parent_addr,
            op_idx_in_group,
            basic_block_context,
            user_op_helpers,
        );
        self.fill_trace_row(system, stack, decoder_row);
    }
}

// CONTROL FLOW TRACE ROW METHODS
// ================================================================================================

impl<'a> CoreTraceGenerationTracer<'a> {
    // CALL operations
    // -------------------------------------------------------------------------------------------

    /// Fills a trace row for the start of a CALL/SYSCALL operation.
    pub fn fill_call_start_trace_row(
        &mut self,
        system: &SystemState,
        stack: &StackState,
        call_node: &CallNode,
        current_forest: &MastForest,
    ) -> Result<(), ExecutionError> {
        // For CALL/SYSCALL operations, the hasher state in start operations contains the callee
        // hash in the first half, and zeros in the second half (since CALL only has one
        // child)
        let callee_hash: Word = get_node_in_forest(current_forest, call_node.callee())?.digest();
        let zero_hash = Word::default();

        let decoder_row = DecoderRow::new_control_flow(
            if call_node.is_syscall() {
                opcodes::SYSCALL
            } else {
                opcodes::CALL
            },
            (callee_hash, zero_hash),
            self.decoder_state.parent_addr,
        );

        self.fill_trace_row(system, stack, decoder_row);
        Ok(())
    }

    // DYN operations
    // -------------------------------------------------------------------------------------------

    /// Fills a trace row for the start of a DYN operation.
    pub fn fill_dyn_start_trace_row(
        &mut self,
        system: &SystemState,
        stack: &StackState,
        callee_hash: Word,
    ) {
        let decoder_row = DecoderRow::new_control_flow(
            opcodes::DYN,
            (callee_hash, Word::default()),
            self.decoder_state.parent_addr,
        );
        self.fill_trace_row(system, stack, decoder_row)
    }

    /// Fills a trace row for the start of a DYNCALL operation.
    ///
    /// The decoder hasher trace columns are populated with the callee hash, as well as the stack
    /// helper registers (specifically their state after shifting the stack left). We need to store
    /// those in the decoder trace so that the block stack table can access them (since in the next
    /// row, we start a new context, and hence the stack registers are reset to their default
    /// values).
    pub fn fill_dyncall_start_trace_row(
        &mut self,
        system: &SystemState,
        stack: &StackState,
        callee_hash: Word,
        ctx_info: ExecutionContextInfo,
    ) {
        let second_hasher_state: Word = [
            Felt::from_u32(ctx_info.parent_stack_depth),
            ctx_info.parent_next_overflow_addr,
            ZERO,
            ZERO,
        ]
        .into();

        let decoder_row = DecoderRow::new_control_flow(
            opcodes::DYNCALL,
            (callee_hash, second_hasher_state),
            self.decoder_state.parent_addr,
        );
        self.fill_trace_row(system, stack, decoder_row)
    }

    // JOIN operations
    // -------------------------------------------------------------------------------------------

    /// Fills a trace row for starting a JOIN operation to the main trace fragment.
    pub fn fill_join_start_trace_row(
        &mut self,
        system: &SystemState,
        stack: &StackState,
        join_node: &JoinNode,
        current_forest: &MastForest,
    ) -> Result<(), ExecutionError> {
        // Get the child hashes for the hasher state
        let child1_hash: Word = get_node_in_forest(current_forest, join_node.first())?.digest();
        let child2_hash: Word = get_node_in_forest(current_forest, join_node.second())?.digest();

        let decoder_row = DecoderRow::new_control_flow(
            opcodes::JOIN,
            (child1_hash, child2_hash),
            self.decoder_state.parent_addr,
        );

        self.fill_trace_row(system, stack, decoder_row);
        Ok(())
    }

    // LOOP operations
    // -------------------------------------------------------------------------------------------

    /// Fills a trace row for the start of a LOOP operation.
    pub fn fill_loop_start_trace_row(
        &mut self,
        system: &SystemState,
        stack: &StackState,
        loop_node: &LoopNode,
        current_forest: &MastForest,
    ) -> Result<(), ExecutionError> {
        // For LOOP operations, the hasher state in start operations contains the loop body hash in
        // the first half.
        let body_hash: Word = get_node_in_forest(current_forest, loop_node.body())?.digest();
        let zero_hash = Word::default();

        let decoder_row = DecoderRow::new_control_flow(
            opcodes::LOOP,
            (body_hash, zero_hash),
            self.decoder_state.parent_addr,
        );

        self.fill_trace_row(system, stack, decoder_row);
        Ok(())
    }

    /// Fills a trace row for the start of a REPEAT operation.
    pub fn fill_loop_repeat_trace_row(
        &mut self,
        system: &SystemState,
        stack: &StackState,
        loop_node: &LoopNode,
        current_forest: &MastForest,
        current_addr: Felt,
    ) -> Result<(), ExecutionError> {
        // For REPEAT operations, the hasher state in start operations contains the loop body hash
        // in the first half.
        let body_hash: Word = get_node_in_forest(current_forest, loop_node.body())?.digest();

        let decoder_row = DecoderRow::new_control_flow(
            opcodes::REPEAT,
            // We set hasher[4] (is_loop_body) to 1
            (body_hash, [ONE, ZERO, ZERO, ZERO].into()),
            current_addr,
        );

        self.fill_trace_row(system, stack, decoder_row);
        Ok(())
    }

    // SPLIT operations
    // -------------------------------------------------------------------------------------------

    /// Fills a trace row for the start of a SPLIT operation.
    pub fn fill_split_start_trace_row(
        &mut self,
        system: &SystemState,
        stack: &StackState,
        split_node: &SplitNode,
        current_forest: &MastForest,
    ) -> Result<(), ExecutionError> {
        // Get the child hashes for the hasher state
        let on_true_hash: Word = get_node_in_forest(current_forest, split_node.on_true())?.digest();
        let on_false_hash: Word =
            get_node_in_forest(current_forest, split_node.on_false())?.digest();

        let decoder_row = DecoderRow::new_control_flow(
            opcodes::SPLIT,
            (on_true_hash, on_false_hash),
            self.decoder_state.parent_addr,
        );

        self.fill_trace_row(system, stack, decoder_row);
        Ok(())
    }

    /// Fills a trace row for the end of a control block.
    ///
    /// This method also updates the decoder state by popping the block from the stack.
    pub fn fill_end_trace_row(
        &mut self,
        system: &SystemState,
        stack: &StackState,
        node_digest: Word,
    ) -> Result<(), ExecutionError> {
        // Pop the block from stack and use its info for END operations
        let (ended_node_addr, flags) =
            self.decoder_state.replay_node_end(&mut self.block_stack_replay)?;

        let decoder_row = DecoderRow::new_control_flow(
            opcodes::END,
            (node_digest, flags.to_hasher_state_second_word()),
            ended_node_addr,
        );

        self.fill_trace_row(system, stack, decoder_row);
        Ok(())
    }
}

// HELPER METHODS
// ================================================================================================

impl<'a> CoreTraceGenerationTracer<'a> {
    /// Fills a trace row for a control flow operation (JOIN/SPLIT start or end) to the main trace
    /// fragment.
    ///
    /// This is a shared implementation that handles the common trace row generation logic
    /// for both JOIN and SPLIT operations. The operation-specific details are provided
    /// through the `config` parameter.
    fn fill_trace_row(
        &mut self,
        system: &SystemState,
        stack: &StackState,
        decoder_row: DecoderRow,
    ) {
        // System trace columns (identical for all control flow operations)
        self.populate_system_trace_columns(system, self.row_write_index);

        // Decoder trace columns
        self.populate_decoder_trace_columns(self.row_write_index, &decoder_row);

        // Stack trace columns (identical for all control flow operations)
        self.populate_stack_trace_columns(stack, self.row_write_index);

        // Increment the row write index
        self.row_write_index += 1;
    }

    /// Populates the system trace columns
    fn populate_system_trace_columns(&mut self, system: &SystemState, row_idx: usize) {
        // If we have buffered system rows from the previous call, write them to the trace
        if let Some(system_rows) = self.system_cols {
            // Write buffered system rows to the trace at current row
            for (i, &value) in system_rows.iter().enumerate() {
                self.fragment.columns[i][row_idx] = value;
            }
        }

        // Now populate the buffer with current system state for the next row
        let mut new_system_rows = [ZERO; SYS_TRACE_WIDTH];

        new_system_rows[CLK_COL_IDX] = (system.clk + 1).into();
        new_system_rows[CTX_COL_IDX] = system.ctx.into();
        new_system_rows[FN_HASH_OFFSET] = system.fn_hash[0];
        new_system_rows[FN_HASH_OFFSET + 1] = system.fn_hash[1];
        new_system_rows[FN_HASH_OFFSET + 2] = system.fn_hash[2];
        new_system_rows[FN_HASH_OFFSET + 3] = system.fn_hash[3];

        // Store the buffer for the next call
        self.system_cols = Some(new_system_rows);
    }

    /// Populates the decoder trace columns with operation-specific data
    fn populate_decoder_trace_columns(&mut self, row_idx: usize, row: &DecoderRow) {
        // Block address
        self.fragment.columns[DECODER_TRACE_OFFSET + ADDR_COL_IDX][row_idx] = row.addr;

        // Decompose operation into bits
        let opcode = row.opcode;
        for i in 0..NUM_OP_BITS {
            let bit = Felt::from_u8((opcode >> i) & 1);
            self.fragment.columns[DECODER_TRACE_OFFSET + OP_BITS_OFFSET + i][row_idx] = bit;
        }

        // Hasher state
        let (first_hash, second_hash) = row.hasher_state;
        self.fragment.columns[DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET][row_idx] = first_hash[0]; // hasher[0]
        self.fragment.columns[DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET + 1][row_idx] =
            first_hash[1]; // hasher[1]
        self.fragment.columns[DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET + 2][row_idx] =
            first_hash[2]; // hasher[2]
        self.fragment.columns[DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET + 3][row_idx] =
            first_hash[3]; // hasher[3]
        self.fragment.columns[DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET + 4][row_idx] =
            second_hash[0]; // hasher[4]
        self.fragment.columns[DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET + 5][row_idx] =
            second_hash[1]; // hasher[5]
        self.fragment.columns[DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET + 6][row_idx] =
            second_hash[2]; // hasher[6]
        self.fragment.columns[DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET + 7][row_idx] =
            second_hash[3]; // hasher[7]

        // Remaining decoder trace columns (identical for all control flow operations)
        self.fragment.columns[DECODER_TRACE_OFFSET + OP_INDEX_COL_IDX][row_idx] = row.op_index;
        self.fragment.columns[DECODER_TRACE_OFFSET + GROUP_COUNT_COL_IDX][row_idx] =
            row.group_count;
        self.fragment.columns[DECODER_TRACE_OFFSET + IN_SPAN_COL_IDX][row_idx] =
            if row.in_basic_block { ONE } else { ZERO };

        // Batch flag columns - all 0 for control flow operations
        for i in 0..NUM_OP_BATCH_FLAGS {
            self.fragment.columns[DECODER_TRACE_OFFSET + OP_BATCH_FLAGS_OFFSET + i][row_idx] =
                row.op_batch_flags[i];
        }

        // Extra bit columns
        let bit6 = self.fragment.columns[DECODER_TRACE_OFFSET + OP_BITS_OFFSET + 6][row_idx];
        let bit5 = self.fragment.columns[DECODER_TRACE_OFFSET + OP_BITS_OFFSET + 5][row_idx];
        let bit4 = self.fragment.columns[DECODER_TRACE_OFFSET + OP_BITS_OFFSET + 4][row_idx];
        self.fragment.columns[DECODER_TRACE_OFFSET + OP_BITS_EXTRA_COLS_OFFSET][row_idx] =
            bit6 * (ONE - bit5) * bit4;
        self.fragment.columns[DECODER_TRACE_OFFSET + OP_BITS_EXTRA_COLS_OFFSET + 1][row_idx] =
            bit6 * bit5;
    }

    /// Populates the stack trace columns
    fn populate_stack_trace_columns(&mut self, stack: &StackState, row_idx: usize) {
        use miden_air::trace::STACK_TRACE_WIDTH;

        // If we have buffered stack rows from the previous call, write them to the trace
        if let Some(stack_rows) = self.stack_cols {
            // Write buffered stack rows to the trace at current row
            for (i, &value) in stack_rows.iter().enumerate() {
                self.fragment.columns[STACK_TRACE_OFFSET + i][row_idx] = value;
            }
        }

        // Now populate the buffer with current stack state for the next row
        let mut new_stack_rows = [ZERO; STACK_TRACE_WIDTH];

        // Stack top (16 elements)
        for i in STACK_TOP_RANGE {
            new_stack_rows[STACK_TOP_OFFSET + i] = stack.get(i);
        }

        // Stack helpers (b0, b1, h0)
        // Note: H0 will be inverted using batch inversion later
        new_stack_rows[B0_COL_IDX] = Felt::new(stack.stack_depth() as u64); // b0
        new_stack_rows[B1_COL_IDX] = stack.overflow_addr(); // b1
        new_stack_rows[H0_COL_IDX] = stack.overflow_helper(); // h0

        // Store the buffer for the next call
        self.stack_cols = Some(new_stack_rows);
    }
}

// HELPERS
// ===============================================================================================

/// Returns op batch flags for the specified group count.
fn get_op_batch_flags(num_groups_left: Felt) -> Result<[Felt; 3], ExecutionError> {
    use miden_air::trace::decoder::{
        OP_BATCH_1_GROUPS, OP_BATCH_2_GROUPS, OP_BATCH_4_GROUPS, OP_BATCH_8_GROUPS,
    };
    use miden_core::mast::OP_BATCH_SIZE;

    let num_groups = core::cmp::min(num_groups_left.as_canonical_u64() as usize, OP_BATCH_SIZE);
    match num_groups {
        8 => Ok(OP_BATCH_8_GROUPS),
        4 => Ok(OP_BATCH_4_GROUPS),
        2 => Ok(OP_BATCH_2_GROUPS),
        1 => Ok(OP_BATCH_1_GROUPS),
        _ => Err(ExecutionError::Internal(
            "invalid number of groups in a batch, must be 1, 2, 4, or 8",
        )),
    }
}