miden_processor/
lib.rs

1#![no_std]
2
3#[macro_use]
4extern crate alloc;
5
6#[cfg(feature = "std")]
7extern crate std;
8
9use alloc::{sync::Arc, vec::Vec};
10use core::fmt::{Display, LowerHex};
11
12use miden_air::trace::{
13    CHIPLETS_WIDTH, DECODER_TRACE_WIDTH, MIN_TRACE_LEN, RANGE_CHECK_TRACE_WIDTH, STACK_TRACE_WIDTH,
14    SYS_TRACE_WIDTH,
15};
16pub use miden_air::{ExecutionOptions, ExecutionOptionsError, RowIndex};
17pub use miden_core::{
18    AssemblyOp, EMPTY_WORD, Felt, Kernel, ONE, Operation, Program, ProgramInfo, QuadExtension,
19    StackInputs, StackOutputs, WORD_SIZE, Word, ZERO,
20    crypto::merkle::SMT_DEPTH,
21    errors::InputError,
22    mast::{MastForest, MastNode, MastNodeExt, MastNodeId},
23    precompile::{PrecompileRequest, PrecompileTranscriptState},
24    sys_events::SystemEvent,
25    utils::DeserializationError,
26};
27use miden_core::{
28    Decorator, FieldElement,
29    mast::{
30        BasicBlockNode, CallNode, DynNode, ExternalNode, JoinNode, LoopNode, OpBatch, SplitNode,
31    },
32};
33use miden_debug_types::SourceSpan;
34pub use winter_prover::matrix::ColMatrix;
35
36pub(crate) mod continuation_stack;
37
38pub mod fast;
39use fast::FastProcessState;
40pub mod parallel;
41pub(crate) mod processor;
42
43mod operations;
44
45mod system;
46pub use system::ContextId;
47use system::System;
48
49#[cfg(test)]
50mod test_utils;
51
52pub(crate) mod decoder;
53use decoder::Decoder;
54
55mod stack;
56use stack::Stack;
57
58mod range;
59use range::RangeChecker;
60
61mod host;
62
63pub use host::{
64    AdviceMutation, AsyncHost, BaseHost, FutureMaybeSend, MastForestStore, MemMastForestStore,
65    SyncHost,
66    advice::{AdviceError, AdviceInputs, AdviceProvider},
67    debug::DefaultDebugHandler,
68    default::{DefaultHost, HostLibrary},
69    handlers::{
70        AssertError, DebugError, DebugHandler, EventError, EventHandler, EventHandlerRegistry,
71        NoopEventHandler, TraceError,
72    },
73};
74
75mod chiplets;
76use chiplets::Chiplets;
77pub use chiplets::MemoryError;
78
79mod trace;
80use trace::TraceFragment;
81pub use trace::{ChipletsLengths, ExecutionTrace, NUM_RAND_ROWS, TraceLenSummary};
82
83mod errors;
84pub use errors::{ErrorContext, ErrorContextImpl, ExecutionError};
85
86pub mod utils;
87
88#[cfg(all(test, not(feature = "no_err_ctx")))]
89mod tests;
90
91mod debug;
92pub use debug::{AsmOpInfo, VmState, VmStateIterator};
93
94// RE-EXPORTS
95// ================================================================================================
96
97pub mod math {
98    pub use miden_core::{Felt, FieldElement, StarkField};
99    pub use winter_prover::math::fft;
100}
101
102pub mod crypto {
103    pub use miden_core::crypto::{
104        hash::{Blake3_192, Blake3_256, ElementHasher, Hasher, Poseidon2, Rpo256, Rpx256},
105        merkle::{
106            MerkleError, MerklePath, MerkleStore, MerkleTree, NodeIndex, PartialMerkleTree,
107            SimpleSmt,
108        },
109        random::{RandomCoin, RpoRandomCoin, RpxRandomCoin, WinterRandomCoin},
110    };
111}
112
113// TYPE ALIASES
114// ================================================================================================
115
116#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
117pub struct MemoryAddress(u32);
118
119impl From<u32> for MemoryAddress {
120    fn from(addr: u32) -> Self {
121        MemoryAddress(addr)
122    }
123}
124
125impl From<MemoryAddress> for u32 {
126    fn from(value: MemoryAddress) -> Self {
127        value.0
128    }
129}
130
131impl Display for MemoryAddress {
132    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
133        Display::fmt(&self.0, f)
134    }
135}
136
137impl LowerHex for MemoryAddress {
138    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
139        LowerHex::fmt(&self.0, f)
140    }
141}
142
143impl core::ops::Add<MemoryAddress> for MemoryAddress {
144    type Output = Self;
145
146    fn add(self, rhs: MemoryAddress) -> Self::Output {
147        MemoryAddress(self.0 + rhs.0)
148    }
149}
150
151impl core::ops::Add<u32> for MemoryAddress {
152    type Output = Self;
153
154    fn add(self, rhs: u32) -> Self::Output {
155        MemoryAddress(self.0 + rhs)
156    }
157}
158
159type SysTrace = [Vec<Felt>; SYS_TRACE_WIDTH];
160
161pub struct DecoderTrace {
162    trace: [Vec<Felt>; DECODER_TRACE_WIDTH],
163    aux_builder: decoder::AuxTraceBuilder,
164}
165
166pub struct StackTrace {
167    trace: [Vec<Felt>; STACK_TRACE_WIDTH],
168}
169
170pub struct RangeCheckTrace {
171    trace: [Vec<Felt>; RANGE_CHECK_TRACE_WIDTH],
172    aux_builder: range::AuxTraceBuilder,
173}
174
175pub struct ChipletsTrace {
176    trace: [Vec<Felt>; CHIPLETS_WIDTH],
177    aux_builder: chiplets::AuxTraceBuilder,
178}
179
180// EXECUTORS
181// ================================================================================================
182
183/// Returns an execution trace resulting from executing the provided program against the provided
184/// inputs.
185///
186/// The `host` parameter is used to provide the external environment to the program being executed,
187/// such as access to the advice provider and libraries that the program depends on.
188#[tracing::instrument("execute_program", skip_all)]
189pub fn execute(
190    program: &Program,
191    stack_inputs: StackInputs,
192    advice_inputs: AdviceInputs,
193    host: &mut impl SyncHost,
194    options: ExecutionOptions,
195) -> Result<ExecutionTrace, ExecutionError> {
196    let mut process = Process::new(program.kernel().clone(), stack_inputs, advice_inputs, options);
197    let stack_outputs = process.execute(program, host)?;
198    let trace = ExecutionTrace::new(process, stack_outputs);
199    assert_eq!(&program.hash(), trace.program_hash(), "inconsistent program hash");
200    Ok(trace)
201}
202
203/// Returns an iterator which allows callers to step through the execution and inspect VM state at
204/// each execution step.
205pub fn execute_iter(
206    program: &Program,
207    stack_inputs: StackInputs,
208    advice_inputs: AdviceInputs,
209    host: &mut impl SyncHost,
210) -> VmStateIterator {
211    let mut process = Process::new_debug(program.kernel().clone(), stack_inputs, advice_inputs);
212    let result = process.execute(program, host);
213    if result.is_ok() {
214        assert_eq!(
215            program.hash(),
216            process.decoder.program_hash().into(),
217            "inconsistent program hash"
218        );
219    }
220    VmStateIterator::new(process, result)
221}
222
223// PROCESS
224// ================================================================================================
225
226/// A [Process] is the underlying execution engine for a Miden [Program].
227///
228/// Typically, you do not need to worry about, or use [Process] directly, instead you should prefer
229/// to use either [execute] or [execute_iter], which also handle setting up the process state,
230/// inputs, as well as compute the [ExecutionTrace] for the program.
231///
232/// However, for situations in which you want finer-grained control over those steps, you will need
233/// to construct an instance of [Process] using [Process::new], invoke [Process::execute], and then
234/// get the execution trace using [ExecutionTrace::new] using the outputs produced by execution.
235#[cfg(not(any(test, feature = "testing")))]
236pub struct Process {
237    advice: AdviceProvider,
238    system: System,
239    decoder: Decoder,
240    stack: Stack,
241    range: RangeChecker,
242    chiplets: Chiplets,
243    max_cycles: u32,
244    enable_tracing: bool,
245    /// Precompile transcript state (sponge capacity) used by `log_precompile`.
246    pc_transcript_state: PrecompileTranscriptState,
247}
248
249#[cfg(any(test, feature = "testing"))]
250pub struct Process {
251    pub advice: AdviceProvider,
252    pub system: System,
253    pub decoder: Decoder,
254    pub stack: Stack,
255    pub range: RangeChecker,
256    pub chiplets: Chiplets,
257    pub max_cycles: u32,
258    pub enable_tracing: bool,
259    /// Precompile transcript state (sponge capacity) used by `log_precompile`.
260    pub pc_transcript_state: PrecompileTranscriptState,
261    /// Tracks decorator retrieval calls for testing.
262    #[cfg(test)]
263    pub decorator_retrieval_count: core::cell::Cell<usize>,
264}
265
266impl Process {
267    // CONSTRUCTORS
268    // --------------------------------------------------------------------------------------------
269    /// Creates a new process with the provided inputs.
270    pub fn new(
271        kernel: Kernel,
272        stack_inputs: StackInputs,
273        advice_inputs: AdviceInputs,
274        execution_options: ExecutionOptions,
275    ) -> Self {
276        Self::initialize(kernel, stack_inputs, advice_inputs, execution_options)
277    }
278
279    /// Creates a new process with provided inputs and debug options enabled.
280    pub fn new_debug(
281        kernel: Kernel,
282        stack_inputs: StackInputs,
283        advice_inputs: AdviceInputs,
284    ) -> Self {
285        Self::initialize(
286            kernel,
287            stack_inputs,
288            advice_inputs,
289            ExecutionOptions::default().with_tracing().with_debugging(true),
290        )
291    }
292
293    fn initialize(
294        kernel: Kernel,
295        stack: StackInputs,
296        advice_inputs: AdviceInputs,
297        execution_options: ExecutionOptions,
298    ) -> Self {
299        let in_debug_mode =
300            execution_options.enable_debugging() || execution_options.enable_tracing();
301        Self {
302            advice: advice_inputs.into(),
303            system: System::new(execution_options.expected_cycles() as usize),
304            decoder: Decoder::new(in_debug_mode),
305            stack: Stack::new(&stack, execution_options.expected_cycles() as usize, in_debug_mode),
306            range: RangeChecker::new(),
307            chiplets: Chiplets::new(kernel),
308            max_cycles: execution_options.max_cycles(),
309            enable_tracing: execution_options.enable_tracing(),
310            pc_transcript_state: PrecompileTranscriptState::default(),
311            #[cfg(test)]
312            decorator_retrieval_count: core::cell::Cell::new(0),
313        }
314    }
315
316    #[cfg(test)]
317    #[inline(always)]
318    fn record_decorator_retrieval(&self) {
319        self.decorator_retrieval_count.set(self.decorator_retrieval_count.get() + 1);
320    }
321
322    #[cfg(not(test))]
323    #[inline(always)]
324    fn record_decorator_retrieval(&self) {}
325
326    // PROGRAM EXECUTOR
327    // --------------------------------------------------------------------------------------------
328
329    /// Executes the provided [`Program`] in this process.
330    pub fn execute(
331        &mut self,
332        program: &Program,
333        host: &mut impl SyncHost,
334    ) -> Result<StackOutputs, ExecutionError> {
335        if self.system.clk() != 0 {
336            return Err(ExecutionError::ProgramAlreadyExecuted);
337        }
338
339        self.advice
340            .extend_map(program.mast_forest().advice_map())
341            .map_err(|err| ExecutionError::advice_error(err, RowIndex::from(0), &()))?;
342
343        self.execute_mast_node(program.entrypoint(), &program.mast_forest().clone(), host)?;
344
345        self.stack.build_stack_outputs()
346    }
347
348    // NODE EXECUTORS
349    // --------------------------------------------------------------------------------------------
350
351    fn execute_mast_node(
352        &mut self,
353        node_id: MastNodeId,
354        program: &MastForest,
355        host: &mut impl SyncHost,
356    ) -> Result<(), ExecutionError> {
357        let node = program
358            .get_node_by_id(node_id)
359            .ok_or(ExecutionError::MastNodeNotFoundInForest { node_id })?;
360
361        if self.decoder.in_debug_mode() {
362            self.record_decorator_retrieval();
363            for &decorator_id in node.before_enter(program) {
364                self.execute_decorator(&program[decorator_id], host)?;
365            }
366        }
367
368        match node {
369            MastNode::Block(node) => self.execute_basic_block_node(node_id, node, program, host)?,
370            MastNode::Join(node) => self.execute_join_node(node, program, host)?,
371            MastNode::Split(node) => self.execute_split_node(node, program, host)?,
372            MastNode::Loop(node) => self.execute_loop_node(node, program, host)?,
373            MastNode::Call(node) => {
374                let err_ctx = err_ctx!(program, node, host, self.decoder.in_debug_mode());
375                add_error_ctx_to_external_error(
376                    self.execute_call_node(node, program, host),
377                    err_ctx,
378                )?
379            },
380            MastNode::Dyn(node) => {
381                let err_ctx = err_ctx!(program, node, host, self.decoder.in_debug_mode());
382                add_error_ctx_to_external_error(
383                    self.execute_dyn_node(node, program, host),
384                    err_ctx,
385                )?
386            },
387            MastNode::External(external_node) => {
388                let (root_id, mast_forest) = self.resolve_external_node(external_node, host)?;
389
390                self.execute_mast_node(root_id, &mast_forest, host)?;
391            },
392        }
393
394        if self.decoder.in_debug_mode() {
395            self.record_decorator_retrieval();
396            for &decorator_id in node.after_exit(program) {
397                self.execute_decorator(&program[decorator_id], host)?;
398            }
399        }
400
401        Ok(())
402    }
403
404    /// Executes the specified [JoinNode].
405    #[inline(always)]
406    fn execute_join_node(
407        &mut self,
408        node: &JoinNode,
409        program: &MastForest,
410        host: &mut impl SyncHost,
411    ) -> Result<(), ExecutionError> {
412        self.start_join_node(node, program, host)?;
413
414        // execute first and then second child of the join block
415        self.execute_mast_node(node.first(), program, host)?;
416        self.execute_mast_node(node.second(), program, host)?;
417
418        self.end_join_node(node, program, host)
419    }
420
421    /// Executes the specified [SplitNode].
422    #[inline(always)]
423    fn execute_split_node(
424        &mut self,
425        node: &SplitNode,
426        program: &MastForest,
427        host: &mut impl SyncHost,
428    ) -> Result<(), ExecutionError> {
429        // start the SPLIT block; this also pops the stack and returns the popped element
430        let condition = self.start_split_node(node, program, host)?;
431
432        // execute either the true or the false branch of the split block based on the condition
433        if condition == ONE {
434            self.execute_mast_node(node.on_true(), program, host)?;
435        } else if condition == ZERO {
436            self.execute_mast_node(node.on_false(), program, host)?;
437        } else {
438            let err_ctx = err_ctx!(program, node, host, self.decoder.in_debug_mode());
439            return Err(ExecutionError::not_binary_value_if(condition, &err_ctx));
440        }
441
442        self.end_split_node(node, program, host)
443    }
444
445    /// Executes the specified [LoopNode].
446    #[inline(always)]
447    fn execute_loop_node(
448        &mut self,
449        node: &LoopNode,
450        program: &MastForest,
451        host: &mut impl SyncHost,
452    ) -> Result<(), ExecutionError> {
453        // start the LOOP block; this also pops the stack and returns the popped element
454        let condition = self.start_loop_node(node, program, host)?;
455
456        // if the top of the stack is ONE, execute the loop body; otherwise skip the loop body
457        if condition == ONE {
458            // execute the loop body at least once
459            self.execute_mast_node(node.body(), program, host)?;
460
461            // keep executing the loop body until the condition on the top of the stack is no
462            // longer ONE; each iteration of the loop is preceded by executing REPEAT operation
463            // which drops the condition from the stack
464            while self.stack.peek() == ONE {
465                self.decoder.repeat();
466                self.execute_op(Operation::Drop, program, host)?;
467                self.execute_mast_node(node.body(), program, host)?;
468            }
469
470            if self.stack.peek() != ZERO {
471                let err_ctx = err_ctx!(program, node, host, self.decoder.in_debug_mode());
472                return Err(ExecutionError::not_binary_value_loop(self.stack.peek(), &err_ctx));
473            }
474
475            // end the LOOP block and drop the condition from the stack
476            self.end_loop_node(node, true, program, host)
477        } else if condition == ZERO {
478            // end the LOOP block, but don't drop the condition from the stack because it was
479            // already dropped when we started the LOOP block
480            self.end_loop_node(node, false, program, host)
481        } else {
482            let err_ctx = err_ctx!(program, node, host, self.decoder.in_debug_mode());
483            Err(ExecutionError::not_binary_value_loop(condition, &err_ctx))
484        }
485    }
486
487    /// Executes the specified [CallNode].
488    #[inline(always)]
489    fn execute_call_node(
490        &mut self,
491        call_node: &CallNode,
492        program: &MastForest,
493        host: &mut impl SyncHost,
494    ) -> Result<(), ExecutionError> {
495        // if this is a syscall, make sure the call target exists in the kernel
496        if call_node.is_syscall() {
497            let callee = program.get_node_by_id(call_node.callee()).ok_or_else(|| {
498                ExecutionError::MastNodeNotFoundInForest { node_id: call_node.callee() }
499            })?;
500            let err_ctx = err_ctx!(program, call_node, host, self.decoder.in_debug_mode());
501            self.chiplets.kernel_rom.access_proc(callee.digest(), &err_ctx)?;
502        }
503        let err_ctx = err_ctx!(program, call_node, host, self.decoder.in_debug_mode());
504
505        self.start_call_node(call_node, program, host, &err_ctx)?;
506        self.execute_mast_node(call_node.callee(), program, host)?;
507        self.end_call_node(call_node, program, host, &err_ctx)
508    }
509
510    /// Executes the specified [miden_core::mast::DynNode].
511    ///
512    /// The MAST root of the callee is assumed to be at the top of the stack, and the callee is
513    /// expected to be either in the current `program` or in the host.
514    #[inline(always)]
515    fn execute_dyn_node(
516        &mut self,
517        node: &DynNode,
518        program: &MastForest,
519        host: &mut impl SyncHost,
520    ) -> Result<(), ExecutionError> {
521        let err_ctx = err_ctx!(program, node, host, self.decoder.in_debug_mode());
522
523        let callee_hash = if node.is_dyncall() {
524            self.start_dyncall_node(node, &err_ctx)?
525        } else {
526            self.start_dyn_node(node, program, host, &err_ctx)?
527        };
528
529        // if the callee is not in the program's MAST forest, try to find a MAST forest for it in
530        // the host (corresponding to an external library loaded in the host); if none are
531        // found, return an error.
532        match program.find_procedure_root(callee_hash) {
533            Some(callee_id) => self.execute_mast_node(callee_id, program, host)?,
534            None => {
535                let mast_forest = host
536                    .get_mast_forest(&callee_hash)
537                    .ok_or_else(|| ExecutionError::dynamic_node_not_found(callee_hash, &err_ctx))?;
538
539                // We limit the parts of the program that can be called externally to procedure
540                // roots, even though MAST doesn't have that restriction.
541                let root_id = mast_forest
542                    .find_procedure_root(callee_hash)
543                    .ok_or(ExecutionError::malfored_mast_forest_in_host(callee_hash, &()))?;
544
545                // Merge the advice map of this forest into the advice provider.
546                // Note that the map may be merged multiple times if a different procedure from the
547                // same forest is called.
548                // For now, only compiled libraries contain non-empty advice maps, so for most
549                // cases, this call will be cheap.
550                self.advice
551                    .extend_map(mast_forest.advice_map())
552                    .map_err(|err| ExecutionError::advice_error(err, self.system.clk(), &()))?;
553
554                self.execute_mast_node(root_id, &mast_forest, host)?
555            },
556        }
557
558        if node.is_dyncall() {
559            self.end_dyncall_node(node, program, host, &err_ctx)
560        } else {
561            self.end_dyn_node(node, program, host)
562        }
563    }
564
565    /// Executes the specified [BasicBlockNode].
566    ///
567    /// # Arguments
568    /// * `node_id` - The ID of this basic block node in the `program` MAST forest. This should
569    ///   match the ID in `basic_block.decorators` when it's `Linked`.
570    #[inline(always)]
571    fn execute_basic_block_node(
572        &mut self,
573        node_id: MastNodeId,
574        basic_block: &BasicBlockNode,
575        program: &MastForest,
576        host: &mut impl SyncHost,
577    ) -> Result<(), ExecutionError> {
578        self.start_basic_block_node(basic_block, program, host)?;
579
580        let mut op_offset = 0;
581
582        // execute the first operation batch
583        self.execute_op_batch(basic_block, &basic_block.op_batches()[0], op_offset, program, host)?;
584        op_offset += basic_block.op_batches()[0].ops().len();
585
586        // if the span contains more operation batches, execute them. each additional batch is
587        // preceded by a RESPAN operation; executing RESPAN operation does not change the state
588        // of the stack
589        for op_batch in basic_block.op_batches().iter().skip(1) {
590            self.respan(op_batch);
591            self.execute_op(Operation::Noop, program, host)?;
592            self.execute_op_batch(basic_block, op_batch, op_offset, program, host)?;
593            op_offset += op_batch.ops().len();
594        }
595
596        self.end_basic_block_node(basic_block, program, host)?;
597
598        // Execute any decorators which have not been executed during span ops execution; this
599        // can happen for decorators appearing after all operations in a block. These decorators
600        // are executed after BASIC BLOCK is closed to make sure the VM clock cycle advances beyond
601        // the last clock cycle of the BASIC BLOCK ops.
602        if self.decoder.in_debug_mode() {
603            let num_ops = basic_block.num_operations() as usize;
604            self.record_decorator_retrieval();
605            for decorator in program.decorators_for_op(node_id, num_ops) {
606                self.execute_decorator(decorator, host)?;
607            }
608        }
609
610        Ok(())
611    }
612
613    /// Executes all operations in an [OpBatch]. This also ensures that all alignment rules are
614    /// satisfied by executing NOOPs as needed. Specifically:
615    /// - If an operation group ends with an operation carrying an immediate value, a NOOP is
616    ///   executed after it.
617    /// - If the number of groups in a batch is not a power of 2, NOOPs are executed (one per group)
618    ///   to bring it up to the next power of two (e.g., 3 -> 4, 5 -> 8).
619    #[inline(always)]
620    fn execute_op_batch(
621        &mut self,
622        basic_block: &BasicBlockNode,
623        batch: &OpBatch,
624        op_offset: usize,
625        program: &MastForest,
626        host: &mut impl SyncHost,
627    ) -> Result<(), ExecutionError> {
628        let end_indices = batch.end_indices();
629        let mut op_idx = 0;
630        let mut group_idx = 0;
631        let mut next_group_idx = 1;
632
633        // round up the number of groups to be processed to the next power of two; we do this
634        // because the processor requires the number of groups to be either 1, 2, 4, or 8; if
635        // the actual number of groups is smaller, we'll pad the batch with NOOPs at the end
636        let num_batch_groups = batch.num_groups().next_power_of_two();
637
638        // Get the node ID once since it doesn't change within the loop
639        let node_id = basic_block
640            .linked_id()
641            .expect("basic block node should be linked when executing operations");
642
643        // execute operations in the batch one by one
644        for (i, &op) in batch.ops().iter().enumerate() {
645            if self.decoder.in_debug_mode() {
646                let current_op_idx = i + op_offset;
647                self.record_decorator_retrieval();
648                for decorator in program.decorators_for_op(node_id, current_op_idx) {
649                    self.execute_decorator(decorator, host)?;
650                }
651            }
652
653            // decode and execute the operation
654            let err_ctx =
655                err_ctx!(program, basic_block, host, self.decoder.in_debug_mode(), i + op_offset);
656            self.decoder.execute_user_op(op, op_idx);
657            self.execute_op_with_error_ctx(op, program, host, &err_ctx)?;
658
659            // if the operation carries an immediate value, the value is stored at the next group
660            // pointer; so, we advance the pointer to the following group
661            let has_imm = op.imm_value().is_some();
662            if has_imm {
663                next_group_idx += 1;
664            }
665
666            // determine if we've executed all non-decorator operations in a group
667            if i + 1 == end_indices[group_idx] {
668                // move to the next group and reset operation index
669                group_idx = next_group_idx;
670                next_group_idx += 1;
671                op_idx = 0;
672
673                // if we haven't reached the end of the batch yet, set up the decoder for
674                // decoding the next operation group
675                if group_idx < num_batch_groups {
676                    self.decoder.start_op_group(batch.groups()[group_idx]);
677                }
678            } else {
679                // if we are not at the end of the group, just increment the operation index
680                op_idx += 1;
681            }
682        }
683
684        Ok(())
685    }
686
687    /// Executes the specified decorator
688    fn execute_decorator(
689        &mut self,
690        decorator: &Decorator,
691        host: &mut impl SyncHost,
692    ) -> Result<(), ExecutionError> {
693        match decorator {
694            Decorator::Debug(options) => {
695                if self.decoder.in_debug_mode() {
696                    let process = &mut self.state();
697                    let clk = process.clk();
698                    host.on_debug(process, options)
699                        .map_err(|err| ExecutionError::DebugHandlerError { clk, err })?;
700                }
701            },
702            Decorator::AsmOp(assembly_op) => {
703                if self.decoder.in_debug_mode() {
704                    self.decoder.append_asmop(self.system.clk(), assembly_op.clone());
705                }
706            },
707            Decorator::Trace(id) => {
708                if self.enable_tracing {
709                    let process = &mut self.state();
710                    let clk = process.clk();
711                    host.on_trace(process, *id).map_err(|err| {
712                        ExecutionError::TraceHandlerError { clk, trace_id: *id, err }
713                    })?;
714                }
715            },
716        };
717        Ok(())
718    }
719
720    /// Resolves an external node reference to a procedure root using the [`MastForest`] store in
721    /// the provided host.
722    ///
723    /// The [`MastForest`] for the procedure is cached to avoid additional queries to the host.
724    fn resolve_external_node(
725        &mut self,
726        external_node: &ExternalNode,
727        host: &impl SyncHost,
728    ) -> Result<(MastNodeId, Arc<MastForest>), ExecutionError> {
729        let node_digest = external_node.digest();
730
731        let mast_forest = host
732            .get_mast_forest(&node_digest)
733            .ok_or(ExecutionError::no_mast_forest_with_procedure(node_digest, &()))?;
734
735        // We limit the parts of the program that can be called externally to procedure
736        // roots, even though MAST doesn't have that restriction.
737        let root_id = mast_forest
738            .find_procedure_root(node_digest)
739            .ok_or(ExecutionError::malfored_mast_forest_in_host(node_digest, &()))?;
740
741        // if the node that we got by looking up an external reference is also an External
742        // node, we are about to enter into an infinite loop - so, return an error
743        if mast_forest[root_id].is_external() {
744            return Err(ExecutionError::CircularExternalNode(node_digest));
745        }
746
747        // Merge the advice map of this forest into the advice provider.
748        // Note that the map may be merged multiple times if a different procedure from the same
749        // forest is called.
750        // For now, only compiled libraries contain non-empty advice maps, so for most cases,
751        // this call will be cheap.
752        self.advice
753            .extend_map(mast_forest.advice_map())
754            .map_err(|err| ExecutionError::advice_error(err, self.system.clk(), &()))?;
755
756        Ok((root_id, mast_forest))
757    }
758
759    // PUBLIC ACCESSORS
760    // ================================================================================================
761
762    pub const fn kernel(&self) -> &Kernel {
763        self.chiplets.kernel_rom.kernel()
764    }
765
766    pub fn into_parts(
767        self,
768    ) -> (System, Decoder, Stack, RangeChecker, Chiplets, PrecompileTranscriptState) {
769        (
770            self.system,
771            self.decoder,
772            self.stack,
773            self.range,
774            self.chiplets,
775            self.pc_transcript_state,
776        )
777    }
778}
779
780#[derive(Debug)]
781pub struct SlowProcessState<'a> {
782    advice: &'a mut AdviceProvider,
783    system: &'a System,
784    stack: &'a Stack,
785    chiplets: &'a Chiplets,
786}
787
788// PROCESS STATE
789// ================================================================================================
790
791#[derive(Debug)]
792pub enum ProcessState<'a> {
793    Slow(SlowProcessState<'a>),
794    Fast(FastProcessState<'a>),
795    /// A process state that does nothing. Calling any of its methods results in a panic. It is
796    /// expected to be used in conjunction with the `NoopHost`.
797    Noop(()),
798}
799
800impl Process {
801    #[inline(always)]
802    pub fn state(&mut self) -> ProcessState<'_> {
803        ProcessState::Slow(SlowProcessState {
804            advice: &mut self.advice,
805            system: &self.system,
806            stack: &self.stack,
807            chiplets: &self.chiplets,
808        })
809    }
810}
811
812impl<'a> ProcessState<'a> {
813    /// Returns a reference to the advice provider.
814    #[inline(always)]
815    pub fn advice_provider(&self) -> &AdviceProvider {
816        match self {
817            ProcessState::Slow(state) => state.advice,
818            ProcessState::Fast(state) => &state.processor.advice,
819            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
820        }
821    }
822
823    /// Returns a mutable reference to the advice provider.
824    #[inline(always)]
825    pub fn advice_provider_mut(&mut self) -> &mut AdviceProvider {
826        match self {
827            ProcessState::Slow(state) => state.advice,
828            ProcessState::Fast(state) => &mut state.processor.advice,
829            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
830        }
831    }
832
833    /// Returns the current clock cycle of a process.
834    #[inline(always)]
835    pub fn clk(&self) -> RowIndex {
836        match self {
837            ProcessState::Slow(state) => state.system.clk(),
838            ProcessState::Fast(state) => state.processor.clk,
839            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
840        }
841    }
842
843    /// Returns the current execution context ID.
844    #[inline(always)]
845    pub fn ctx(&self) -> ContextId {
846        match self {
847            ProcessState::Slow(state) => state.system.ctx(),
848            ProcessState::Fast(state) => state.processor.ctx,
849            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
850        }
851    }
852
853    /// Returns the value located at the specified position on the stack at the current clock cycle.
854    ///
855    /// This method can access elements beyond the top 16 positions by using the overflow table.
856    #[inline(always)]
857    pub fn get_stack_item(&self, pos: usize) -> Felt {
858        match self {
859            ProcessState::Slow(state) => state.stack.get(pos),
860            ProcessState::Fast(state) => state.processor.stack_get(pos),
861            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
862        }
863    }
864
865    /// Returns a word starting at the specified element index on the stack in big-endian
866    /// (reversed) order.
867    ///
868    /// The word is formed by taking 4 consecutive elements starting from the specified index.
869    /// For example, start_idx=0 creates a word from stack elements 0-3, start_idx=1 creates
870    /// a word from elements 1-4, etc.
871    ///
872    /// In big-endian order, stack element N+3 will be at position 0 of the word, N+2 at
873    /// position 1, N+1 at position 2, and N at position 3. This matches the behavior of
874    /// `mem_loadw_be` where `mem[a+3]` ends up on top of the stack.
875    ///
876    /// This method can access elements beyond the top 16 positions by using the overflow table.
877    /// Creating a word does not change the state of the stack.
878    #[inline(always)]
879    pub fn get_stack_word_be(&self, start_idx: usize) -> Word {
880        match self {
881            ProcessState::Slow(state) => state.stack.get_word(start_idx),
882            ProcessState::Fast(state) => state.processor.stack_get_word(start_idx),
883            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
884        }
885    }
886
887    /// Returns a word starting at the specified element index on the stack in little-endian
888    /// (memory) order.
889    ///
890    /// The word is formed by taking 4 consecutive elements starting from the specified index.
891    /// For example, start_idx=0 creates a word from stack elements 0-3, start_idx=1 creates
892    /// a word from elements 1-4, etc.
893    ///
894    /// In little-endian order, stack element N will be at position 0 of the word, N+1 at
895    /// position 1, N+2 at position 2, and N+3 at position 3. This matches the behavior of
896    /// `mem_loadw_le` where `mem[a]` ends up on top of the stack.
897    ///
898    /// This method can access elements beyond the top 16 positions by using the overflow table.
899    /// Creating a word does not change the state of the stack.
900    #[inline(always)]
901    pub fn get_stack_word_le(&self, start_idx: usize) -> Word {
902        let mut word = self.get_stack_word_be(start_idx);
903        word.reverse();
904        word
905    }
906
907    /// Returns a word starting at the specified element index on the stack.
908    ///
909    /// This is an alias for [`Self::get_stack_word_be`] for backward compatibility. For new code,
910    /// prefer using the explicit `get_stack_word_be()` or `get_stack_word_le()` to make the
911    /// ordering expectations clear.
912    ///
913    /// See [`Self::get_stack_word_be`] for detailed documentation.
914    #[deprecated(
915        since = "0.19.0",
916        note = "Use `get_stack_word_be()` or `get_stack_word_le()` to make endianness explicit"
917    )]
918    #[inline(always)]
919    pub fn get_stack_word(&self, start_idx: usize) -> Word {
920        self.get_stack_word_be(start_idx)
921    }
922
923    /// Returns stack state at the current clock cycle. This includes the top 16 items of the
924    /// stack + overflow entries.
925    #[inline(always)]
926    pub fn get_stack_state(&self) -> Vec<Felt> {
927        match self {
928            ProcessState::Slow(state) => state.stack.get_state_at(state.system.clk()),
929            ProcessState::Fast(state) => state.processor.stack().iter().rev().copied().collect(),
930            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
931        }
932    }
933
934    /// Returns the element located at the specified context/address, or None if the address hasn't
935    /// been accessed previously.
936    #[inline(always)]
937    pub fn get_mem_value(&self, ctx: ContextId, addr: u32) -> Option<Felt> {
938        match self {
939            ProcessState::Slow(state) => state.chiplets.memory.get_value(ctx, addr),
940            ProcessState::Fast(state) => state.processor.memory.read_element_impl(ctx, addr),
941            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
942        }
943    }
944
945    /// Returns the batch of elements starting at the specified context/address.
946    ///
947    /// # Errors
948    /// - If the address is not word aligned.
949    #[inline(always)]
950    pub fn get_mem_word(&self, ctx: ContextId, addr: u32) -> Result<Option<Word>, MemoryError> {
951        match self {
952            ProcessState::Slow(state) => state.chiplets.memory.get_word(ctx, addr),
953            ProcessState::Fast(state) => {
954                state.processor.memory.read_word_impl(ctx, addr, None, &())
955            },
956            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
957        }
958    }
959
960    /// Reads (start_addr, end_addr) tuple from the specified elements of the operand stack (
961    /// without modifying the state of the stack), and verifies that memory range is valid.
962    pub fn get_mem_addr_range(
963        &self,
964        start_idx: usize,
965        end_idx: usize,
966    ) -> Result<core::ops::Range<u32>, MemoryError> {
967        let start_addr = self.get_stack_item(start_idx).as_int();
968        let end_addr = self.get_stack_item(end_idx).as_int();
969
970        if start_addr > u32::MAX as u64 {
971            return Err(MemoryError::address_out_of_bounds(start_addr, &()));
972        }
973        if end_addr > u32::MAX as u64 {
974            return Err(MemoryError::address_out_of_bounds(end_addr, &()));
975        }
976
977        if start_addr > end_addr {
978            return Err(MemoryError::InvalidMemoryRange { start_addr, end_addr });
979        }
980
981        Ok(start_addr as u32..end_addr as u32)
982    }
983
984    /// Returns the entire memory state for the specified execution context at the current clock
985    /// cycle.
986    ///
987    /// The state is returned as a vector of (address, value) tuples, and includes addresses which
988    /// have been accessed at least once.
989    #[inline(always)]
990    pub fn get_mem_state(&self, ctx: ContextId) -> Vec<(MemoryAddress, Felt)> {
991        match self {
992            ProcessState::Slow(state) => {
993                state.chiplets.memory.get_state_at(ctx, state.system.clk())
994            },
995            ProcessState::Fast(state) => state.processor.memory.get_memory_state(ctx),
996            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
997        }
998    }
999}
1000
1001impl<'a> From<&'a mut Process> for ProcessState<'a> {
1002    fn from(process: &'a mut Process) -> Self {
1003        process.state()
1004    }
1005}
1006
1007// HELPERS
1008// ================================================================================================
1009
1010/// For errors generated from processing an `ExternalNode`, returns the same error except with
1011/// proper error context.
1012pub(crate) fn add_error_ctx_to_external_error(
1013    result: Result<(), ExecutionError>,
1014    err_ctx: impl ErrorContext,
1015) -> Result<(), ExecutionError> {
1016    match result {
1017        Ok(_) => Ok(()),
1018        // Add context information to any errors coming from executing an `ExternalNode`
1019        Err(err) => match err {
1020            ExecutionError::NoMastForestWithProcedure { label, source_file: _, root_digest }
1021            | ExecutionError::MalformedMastForestInHost { label, source_file: _, root_digest } => {
1022                if label == SourceSpan::UNKNOWN {
1023                    let err_with_ctx =
1024                        ExecutionError::no_mast_forest_with_procedure(root_digest, &err_ctx);
1025                    Err(err_with_ctx)
1026                } else {
1027                    // If the source span was already populated, just return the error as-is. This
1028                    // would occur when a call deeper down the call stack was responsible for the
1029                    // error.
1030                    Err(err)
1031                }
1032            },
1033
1034            _ => {
1035                // do nothing
1036                Err(err)
1037            },
1038        },
1039    }
1040}