miden_processor/
lib.rs

1#![no_std]
2
3#[macro_use]
4extern crate alloc;
5
6#[cfg(feature = "std")]
7extern crate std;
8
9use alloc::{sync::Arc, vec::Vec};
10use core::fmt::{Display, LowerHex};
11
12use miden_air::trace::{
13    CHIPLETS_WIDTH, DECODER_TRACE_WIDTH, MIN_TRACE_LEN, RANGE_CHECK_TRACE_WIDTH, STACK_TRACE_WIDTH,
14    SYS_TRACE_WIDTH,
15};
16pub use miden_air::{ExecutionOptions, ExecutionOptionsError, RowIndex};
17pub use miden_core::{
18    AssemblyOp, EMPTY_WORD, Felt, Kernel, ONE, Operation, Program, ProgramInfo, QuadExtension,
19    StackInputs, StackOutputs, WORD_SIZE, Word, ZERO,
20    crypto::merkle::SMT_DEPTH,
21    errors::InputError,
22    mast::{MastForest, MastNode, MastNodeExt, MastNodeId},
23    precompile::{PrecompileRequest, PrecompileTranscriptState},
24    sys_events::SystemEvent,
25    utils::DeserializationError,
26};
27use miden_core::{
28    Decorator, FieldElement,
29    mast::{
30        BasicBlockNode, CallNode, DecoratorOpLinkIterator, DynNode, ExternalNode, JoinNode,
31        LoopNode, OpBatch, SplitNode,
32    },
33};
34use miden_debug_types::SourceSpan;
35pub use winter_prover::matrix::ColMatrix;
36
37pub(crate) mod continuation_stack;
38
39pub mod fast;
40use fast::FastProcessState;
41pub mod parallel;
42pub(crate) mod processor;
43
44mod operations;
45
46mod system;
47pub use system::ContextId;
48use system::System;
49
50#[cfg(test)]
51mod test_utils;
52
53pub(crate) mod decoder;
54use decoder::Decoder;
55
56mod stack;
57use stack::Stack;
58
59mod range;
60use range::RangeChecker;
61
62mod host;
63
64pub use host::{
65    AdviceMutation, AsyncHost, BaseHost, FutureMaybeSend, MastForestStore, MemMastForestStore,
66    SyncHost,
67    advice::{AdviceError, AdviceInputs, AdviceProvider},
68    debug::DefaultDebugHandler,
69    default::{DefaultHost, HostLibrary},
70    handlers::{DebugHandler, EventError, EventHandler, EventHandlerRegistry, NoopEventHandler},
71};
72
73mod chiplets;
74use chiplets::Chiplets;
75pub use chiplets::MemoryError;
76
77mod trace;
78use trace::TraceFragment;
79pub use trace::{ChipletsLengths, ExecutionTrace, NUM_RAND_ROWS, TraceLenSummary};
80
81mod errors;
82pub use errors::{ErrorContext, ErrorContextImpl, ExecutionError};
83
84pub mod utils;
85
86#[cfg(all(test, not(feature = "no_err_ctx")))]
87mod tests;
88
89mod debug;
90pub use debug::{AsmOpInfo, VmState, VmStateIterator};
91
92// RE-EXPORTS
93// ================================================================================================
94
95pub mod math {
96    pub use miden_core::{Felt, FieldElement, StarkField};
97    pub use winter_prover::math::fft;
98}
99
100pub mod crypto {
101    pub use miden_core::crypto::{
102        hash::{Blake3_192, Blake3_256, ElementHasher, Hasher, Poseidon2, Rpo256, Rpx256},
103        merkle::{
104            MerkleError, MerklePath, MerkleStore, MerkleTree, NodeIndex, PartialMerkleTree,
105            SimpleSmt,
106        },
107        random::{RandomCoin, RpoRandomCoin, RpxRandomCoin, WinterRandomCoin},
108    };
109}
110
111// TYPE ALIASES
112// ================================================================================================
113
114#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
115pub struct MemoryAddress(u32);
116
117impl From<u32> for MemoryAddress {
118    fn from(addr: u32) -> Self {
119        MemoryAddress(addr)
120    }
121}
122
123impl From<MemoryAddress> for u32 {
124    fn from(value: MemoryAddress) -> Self {
125        value.0
126    }
127}
128
129impl Display for MemoryAddress {
130    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
131        Display::fmt(&self.0, f)
132    }
133}
134
135impl LowerHex for MemoryAddress {
136    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
137        LowerHex::fmt(&self.0, f)
138    }
139}
140
141impl core::ops::Add<MemoryAddress> for MemoryAddress {
142    type Output = Self;
143
144    fn add(self, rhs: MemoryAddress) -> Self::Output {
145        MemoryAddress(self.0 + rhs.0)
146    }
147}
148
149impl core::ops::Add<u32> for MemoryAddress {
150    type Output = Self;
151
152    fn add(self, rhs: u32) -> Self::Output {
153        MemoryAddress(self.0 + rhs)
154    }
155}
156
157type SysTrace = [Vec<Felt>; SYS_TRACE_WIDTH];
158
159pub struct DecoderTrace {
160    trace: [Vec<Felt>; DECODER_TRACE_WIDTH],
161    aux_builder: decoder::AuxTraceBuilder,
162}
163
164pub struct StackTrace {
165    trace: [Vec<Felt>; STACK_TRACE_WIDTH],
166}
167
168pub struct RangeCheckTrace {
169    trace: [Vec<Felt>; RANGE_CHECK_TRACE_WIDTH],
170    aux_builder: range::AuxTraceBuilder,
171}
172
173pub struct ChipletsTrace {
174    trace: [Vec<Felt>; CHIPLETS_WIDTH],
175    aux_builder: chiplets::AuxTraceBuilder,
176}
177
178// EXECUTORS
179// ================================================================================================
180
181/// Returns an execution trace resulting from executing the provided program against the provided
182/// inputs.
183///
184/// The `host` parameter is used to provide the external environment to the program being executed,
185/// such as access to the advice provider and libraries that the program depends on.
186#[tracing::instrument("execute_program", skip_all)]
187pub fn execute(
188    program: &Program,
189    stack_inputs: StackInputs,
190    advice_inputs: AdviceInputs,
191    host: &mut impl SyncHost,
192    options: ExecutionOptions,
193) -> Result<ExecutionTrace, ExecutionError> {
194    let mut process = Process::new(program.kernel().clone(), stack_inputs, advice_inputs, options);
195    let stack_outputs = process.execute(program, host)?;
196    let trace = ExecutionTrace::new(process, stack_outputs);
197    assert_eq!(&program.hash(), trace.program_hash(), "inconsistent program hash");
198    Ok(trace)
199}
200
201/// Returns an iterator which allows callers to step through the execution and inspect VM state at
202/// each execution step.
203pub fn execute_iter(
204    program: &Program,
205    stack_inputs: StackInputs,
206    advice_inputs: AdviceInputs,
207    host: &mut impl SyncHost,
208) -> VmStateIterator {
209    let mut process = Process::new_debug(program.kernel().clone(), stack_inputs, advice_inputs);
210    let result = process.execute(program, host);
211    if result.is_ok() {
212        assert_eq!(
213            program.hash(),
214            process.decoder.program_hash().into(),
215            "inconsistent program hash"
216        );
217    }
218    VmStateIterator::new(process, result)
219}
220
221// PROCESS
222// ================================================================================================
223
224/// A [Process] is the underlying execution engine for a Miden [Program].
225///
226/// Typically, you do not need to worry about, or use [Process] directly, instead you should prefer
227/// to use either [execute] or [execute_iter], which also handle setting up the process state,
228/// inputs, as well as compute the [ExecutionTrace] for the program.
229///
230/// However, for situations in which you want finer-grained control over those steps, you will need
231/// to construct an instance of [Process] using [Process::new], invoke [Process::execute], and then
232/// get the execution trace using [ExecutionTrace::new] using the outputs produced by execution.
233#[cfg(not(any(test, feature = "testing")))]
234pub struct Process {
235    advice: AdviceProvider,
236    system: System,
237    decoder: Decoder,
238    stack: Stack,
239    range: RangeChecker,
240    chiplets: Chiplets,
241    max_cycles: u32,
242    enable_tracing: bool,
243    /// Precompile transcript state (sponge capacity) used by `log_precompile`.
244    pc_transcript_state: PrecompileTranscriptState,
245}
246
247#[cfg(any(test, feature = "testing"))]
248pub struct Process {
249    pub advice: AdviceProvider,
250    pub system: System,
251    pub decoder: Decoder,
252    pub stack: Stack,
253    pub range: RangeChecker,
254    pub chiplets: Chiplets,
255    pub max_cycles: u32,
256    pub enable_tracing: bool,
257    /// Precompile transcript state (sponge capacity) used by `log_precompile`.
258    pub pc_transcript_state: PrecompileTranscriptState,
259}
260
261impl Process {
262    // CONSTRUCTORS
263    // --------------------------------------------------------------------------------------------
264    /// Creates a new process with the provided inputs.
265    pub fn new(
266        kernel: Kernel,
267        stack_inputs: StackInputs,
268        advice_inputs: AdviceInputs,
269        execution_options: ExecutionOptions,
270    ) -> Self {
271        Self::initialize(kernel, stack_inputs, advice_inputs, execution_options)
272    }
273
274    /// Creates a new process with provided inputs and debug options enabled.
275    pub fn new_debug(
276        kernel: Kernel,
277        stack_inputs: StackInputs,
278        advice_inputs: AdviceInputs,
279    ) -> Self {
280        Self::initialize(
281            kernel,
282            stack_inputs,
283            advice_inputs,
284            ExecutionOptions::default().with_tracing().with_debugging(true),
285        )
286    }
287
288    fn initialize(
289        kernel: Kernel,
290        stack: StackInputs,
291        advice_inputs: AdviceInputs,
292        execution_options: ExecutionOptions,
293    ) -> Self {
294        let in_debug_mode = execution_options.enable_debugging();
295        Self {
296            advice: advice_inputs.into(),
297            system: System::new(execution_options.expected_cycles() as usize),
298            decoder: Decoder::new(in_debug_mode),
299            stack: Stack::new(&stack, execution_options.expected_cycles() as usize, in_debug_mode),
300            range: RangeChecker::new(),
301            chiplets: Chiplets::new(kernel),
302            max_cycles: execution_options.max_cycles(),
303            enable_tracing: execution_options.enable_tracing(),
304            pc_transcript_state: PrecompileTranscriptState::default(),
305        }
306    }
307
308    // PROGRAM EXECUTOR
309    // --------------------------------------------------------------------------------------------
310
311    /// Executes the provided [`Program`] in this process.
312    pub fn execute(
313        &mut self,
314        program: &Program,
315        host: &mut impl SyncHost,
316    ) -> Result<StackOutputs, ExecutionError> {
317        if self.system.clk() != 0 {
318            return Err(ExecutionError::ProgramAlreadyExecuted);
319        }
320
321        self.advice
322            .extend_map(program.mast_forest().advice_map())
323            .map_err(|err| ExecutionError::advice_error(err, RowIndex::from(0), &()))?;
324
325        self.execute_mast_node(program.entrypoint(), &program.mast_forest().clone(), host)?;
326
327        self.stack.build_stack_outputs()
328    }
329
330    // NODE EXECUTORS
331    // --------------------------------------------------------------------------------------------
332
333    fn execute_mast_node(
334        &mut self,
335        node_id: MastNodeId,
336        program: &MastForest,
337        host: &mut impl SyncHost,
338    ) -> Result<(), ExecutionError> {
339        let node = program
340            .get_node_by_id(node_id)
341            .ok_or(ExecutionError::MastNodeNotFoundInForest { node_id })?;
342
343        for &decorator_id in node.before_enter() {
344            self.execute_decorator(&program[decorator_id], host)?;
345        }
346
347        match node {
348            MastNode::Block(node) => self.execute_basic_block_node(node, program, host)?,
349            MastNode::Join(node) => self.execute_join_node(node, program, host)?,
350            MastNode::Split(node) => self.execute_split_node(node, program, host)?,
351            MastNode::Loop(node) => self.execute_loop_node(node, program, host)?,
352            MastNode::Call(node) => {
353                let err_ctx = err_ctx!(program, node, host);
354                add_error_ctx_to_external_error(
355                    self.execute_call_node(node, program, host),
356                    err_ctx,
357                )?
358            },
359            MastNode::Dyn(node) => {
360                let err_ctx = err_ctx!(program, node, host);
361                add_error_ctx_to_external_error(
362                    self.execute_dyn_node(node, program, host),
363                    err_ctx,
364                )?
365            },
366            MastNode::External(external_node) => {
367                let (root_id, mast_forest) = self.resolve_external_node(external_node, host)?;
368
369                self.execute_mast_node(root_id, &mast_forest, host)?;
370            },
371        }
372
373        for &decorator_id in node.after_exit() {
374            self.execute_decorator(&program[decorator_id], host)?;
375        }
376
377        Ok(())
378    }
379
380    /// Executes the specified [JoinNode].
381    #[inline(always)]
382    fn execute_join_node(
383        &mut self,
384        node: &JoinNode,
385        program: &MastForest,
386        host: &mut impl SyncHost,
387    ) -> Result<(), ExecutionError> {
388        self.start_join_node(node, program, host)?;
389
390        // execute first and then second child of the join block
391        self.execute_mast_node(node.first(), program, host)?;
392        self.execute_mast_node(node.second(), program, host)?;
393
394        self.end_join_node(node, program, host)
395    }
396
397    /// Executes the specified [SplitNode].
398    #[inline(always)]
399    fn execute_split_node(
400        &mut self,
401        node: &SplitNode,
402        program: &MastForest,
403        host: &mut impl SyncHost,
404    ) -> Result<(), ExecutionError> {
405        // start the SPLIT block; this also pops the stack and returns the popped element
406        let condition = self.start_split_node(node, program, host)?;
407
408        // execute either the true or the false branch of the split block based on the condition
409        if condition == ONE {
410            self.execute_mast_node(node.on_true(), program, host)?;
411        } else if condition == ZERO {
412            self.execute_mast_node(node.on_false(), program, host)?;
413        } else {
414            let err_ctx = err_ctx!(program, node, host);
415            return Err(ExecutionError::not_binary_value_if(condition, &err_ctx));
416        }
417
418        self.end_split_node(node, program, host)
419    }
420
421    /// Executes the specified [LoopNode].
422    #[inline(always)]
423    fn execute_loop_node(
424        &mut self,
425        node: &LoopNode,
426        program: &MastForest,
427        host: &mut impl SyncHost,
428    ) -> Result<(), ExecutionError> {
429        // start the LOOP block; this also pops the stack and returns the popped element
430        let condition = self.start_loop_node(node, program, host)?;
431
432        // if the top of the stack is ONE, execute the loop body; otherwise skip the loop body
433        if condition == ONE {
434            // execute the loop body at least once
435            self.execute_mast_node(node.body(), program, host)?;
436
437            // keep executing the loop body until the condition on the top of the stack is no
438            // longer ONE; each iteration of the loop is preceded by executing REPEAT operation
439            // which drops the condition from the stack
440            while self.stack.peek() == ONE {
441                self.decoder.repeat();
442                self.execute_op(Operation::Drop, program, host)?;
443                self.execute_mast_node(node.body(), program, host)?;
444            }
445
446            if self.stack.peek() != ZERO {
447                let err_ctx = err_ctx!(program, node, host);
448                return Err(ExecutionError::not_binary_value_loop(self.stack.peek(), &err_ctx));
449            }
450
451            // end the LOOP block and drop the condition from the stack
452            self.end_loop_node(node, true, program, host)
453        } else if condition == ZERO {
454            // end the LOOP block, but don't drop the condition from the stack because it was
455            // already dropped when we started the LOOP block
456            self.end_loop_node(node, false, program, host)
457        } else {
458            let err_ctx = err_ctx!(program, node, host);
459            Err(ExecutionError::not_binary_value_loop(condition, &err_ctx))
460        }
461    }
462
463    /// Executes the specified [CallNode].
464    #[inline(always)]
465    fn execute_call_node(
466        &mut self,
467        call_node: &CallNode,
468        program: &MastForest,
469        host: &mut impl SyncHost,
470    ) -> Result<(), ExecutionError> {
471        // if this is a syscall, make sure the call target exists in the kernel
472        if call_node.is_syscall() {
473            let callee = program.get_node_by_id(call_node.callee()).ok_or_else(|| {
474                ExecutionError::MastNodeNotFoundInForest { node_id: call_node.callee() }
475            })?;
476            let err_ctx = err_ctx!(program, call_node, host);
477            self.chiplets.kernel_rom.access_proc(callee.digest(), &err_ctx)?;
478        }
479        let err_ctx = err_ctx!(program, call_node, host);
480
481        self.start_call_node(call_node, program, host, &err_ctx)?;
482        self.execute_mast_node(call_node.callee(), program, host)?;
483        self.end_call_node(call_node, program, host, &err_ctx)
484    }
485
486    /// Executes the specified [miden_core::mast::DynNode].
487    ///
488    /// The MAST root of the callee is assumed to be at the top of the stack, and the callee is
489    /// expected to be either in the current `program` or in the host.
490    #[inline(always)]
491    fn execute_dyn_node(
492        &mut self,
493        node: &DynNode,
494        program: &MastForest,
495        host: &mut impl SyncHost,
496    ) -> Result<(), ExecutionError> {
497        let err_ctx = err_ctx!(program, node, host);
498
499        let callee_hash = if node.is_dyncall() {
500            self.start_dyncall_node(node, &err_ctx)?
501        } else {
502            self.start_dyn_node(node, program, host, &err_ctx)?
503        };
504
505        // if the callee is not in the program's MAST forest, try to find a MAST forest for it in
506        // the host (corresponding to an external library loaded in the host); if none are
507        // found, return an error.
508        match program.find_procedure_root(callee_hash) {
509            Some(callee_id) => self.execute_mast_node(callee_id, program, host)?,
510            None => {
511                let mast_forest = host
512                    .get_mast_forest(&callee_hash)
513                    .ok_or_else(|| ExecutionError::dynamic_node_not_found(callee_hash, &err_ctx))?;
514
515                // We limit the parts of the program that can be called externally to procedure
516                // roots, even though MAST doesn't have that restriction.
517                let root_id = mast_forest
518                    .find_procedure_root(callee_hash)
519                    .ok_or(ExecutionError::malfored_mast_forest_in_host(callee_hash, &()))?;
520
521                // Merge the advice map of this forest into the advice provider.
522                // Note that the map may be merged multiple times if a different procedure from the
523                // same forest is called.
524                // For now, only compiled libraries contain non-empty advice maps, so for most
525                // cases, this call will be cheap.
526                self.advice
527                    .extend_map(mast_forest.advice_map())
528                    .map_err(|err| ExecutionError::advice_error(err, self.system.clk(), &()))?;
529
530                self.execute_mast_node(root_id, &mast_forest, host)?
531            },
532        }
533
534        if node.is_dyncall() {
535            self.end_dyncall_node(node, program, host, &err_ctx)
536        } else {
537            self.end_dyn_node(node, program, host)
538        }
539    }
540
541    /// Executes the specified [BasicBlockNode].
542    #[inline(always)]
543    fn execute_basic_block_node(
544        &mut self,
545        basic_block: &BasicBlockNode,
546        program: &MastForest,
547        host: &mut impl SyncHost,
548    ) -> Result<(), ExecutionError> {
549        self.start_basic_block_node(basic_block, program, host)?;
550
551        let mut op_offset = 0;
552        let mut decorator_ids = basic_block.indexed_decorator_iter();
553
554        // execute the first operation batch
555        self.execute_op_batch(
556            basic_block,
557            &basic_block.op_batches()[0],
558            &mut decorator_ids,
559            op_offset,
560            program,
561            host,
562        )?;
563        op_offset += basic_block.op_batches()[0].ops().len();
564
565        // if the span contains more operation batches, execute them. each additional batch is
566        // preceded by a RESPAN operation; executing RESPAN operation does not change the state
567        // of the stack
568        for op_batch in basic_block.op_batches().iter().skip(1) {
569            self.respan(op_batch);
570            self.execute_op(Operation::Noop, program, host)?;
571            self.execute_op_batch(
572                basic_block,
573                op_batch,
574                &mut decorator_ids,
575                op_offset,
576                program,
577                host,
578            )?;
579            op_offset += op_batch.ops().len();
580        }
581
582        self.end_basic_block_node(basic_block, program, host)?;
583
584        // execute any decorators which have not been executed during span ops execution; this
585        // can happen for decorators appearing after all operations in a block. these decorators
586        // are executed after BASIC BLOCK is closed to make sure the VM clock cycle advances beyond
587        // the last clock cycle of the BASIC BLOCK ops.
588        for (_, decorator_id) in decorator_ids {
589            let decorator = program
590                .get_decorator_by_id(decorator_id)
591                .ok_or(ExecutionError::DecoratorNotFoundInForest { decorator_id })?;
592            self.execute_decorator(decorator, host)?;
593        }
594
595        Ok(())
596    }
597
598    /// Executes all operations in an [OpBatch]. This also ensures that all alignment rules are
599    /// satisfied by executing NOOPs as needed. Specifically:
600    /// - If an operation group ends with an operation carrying an immediate value, a NOOP is
601    ///   executed after it.
602    /// - If the number of groups in a batch is not a power of 2, NOOPs are executed (one per group)
603    ///   to bring it up to the next power of two (e.g., 3 -> 4, 5 -> 8).
604    #[inline(always)]
605    fn execute_op_batch(
606        &mut self,
607        basic_block: &BasicBlockNode,
608        batch: &OpBatch,
609        decorators: &mut DecoratorOpLinkIterator,
610        op_offset: usize,
611        program: &MastForest,
612        host: &mut impl SyncHost,
613    ) -> Result<(), ExecutionError> {
614        let end_indices = batch.end_indices();
615        let mut op_idx = 0;
616        let mut group_idx = 0;
617        let mut next_group_idx = 1;
618
619        // round up the number of groups to be processed to the next power of two; we do this
620        // because the processor requires the number of groups to be either 1, 2, 4, or 8; if
621        // the actual number of groups is smaller, we'll pad the batch with NOOPs at the end
622        let num_batch_groups = batch.num_groups().next_power_of_two();
623
624        // execute operations in the batch one by one
625        for (i, &op) in batch.ops().iter().enumerate() {
626            while let Some((_, decorator_id)) = decorators.next_filtered(i + op_offset) {
627                let decorator = program
628                    .get_decorator_by_id(decorator_id)
629                    .ok_or(ExecutionError::DecoratorNotFoundInForest { decorator_id })?;
630                self.execute_decorator(decorator, host)?;
631            }
632
633            // decode and execute the operation
634            let err_ctx = err_ctx!(program, basic_block, host, i + op_offset);
635            self.decoder.execute_user_op(op, op_idx);
636            self.execute_op_with_error_ctx(op, program, host, &err_ctx)?;
637
638            // if the operation carries an immediate value, the value is stored at the next group
639            // pointer; so, we advance the pointer to the following group
640            let has_imm = op.imm_value().is_some();
641            if has_imm {
642                next_group_idx += 1;
643            }
644
645            // determine if we've executed all non-decorator operations in a group
646            if i + 1 == end_indices[group_idx] {
647                // move to the next group and reset operation index
648                group_idx = next_group_idx;
649                next_group_idx += 1;
650                op_idx = 0;
651
652                // if we haven't reached the end of the batch yet, set up the decoder for
653                // decoding the next operation group
654                if group_idx < num_batch_groups {
655                    self.decoder.start_op_group(batch.groups()[group_idx]);
656                }
657            } else {
658                // if we are not at the end of the group, just increment the operation index
659                op_idx += 1;
660            }
661        }
662
663        Ok(())
664    }
665
666    /// Executes the specified decorator
667    fn execute_decorator(
668        &mut self,
669        decorator: &Decorator,
670        host: &mut impl SyncHost,
671    ) -> Result<(), ExecutionError> {
672        match decorator {
673            Decorator::Debug(options) => {
674                if self.decoder.in_debug_mode() {
675                    let process = &mut self.state();
676                    host.on_debug(process, options)?;
677                }
678            },
679            Decorator::AsmOp(assembly_op) => {
680                if self.decoder.in_debug_mode() {
681                    self.decoder.append_asmop(self.system.clk(), assembly_op.clone());
682                }
683            },
684            Decorator::Trace(id) => {
685                if self.enable_tracing {
686                    let process = &mut self.state();
687                    host.on_trace(process, *id)?;
688                }
689            },
690        };
691        Ok(())
692    }
693
694    /// Resolves an external node reference to a procedure root using the [`MastForest`] store in
695    /// the provided host.
696    ///
697    /// The [`MastForest`] for the procedure is cached to avoid additional queries to the host.
698    fn resolve_external_node(
699        &mut self,
700        external_node: &ExternalNode,
701        host: &impl SyncHost,
702    ) -> Result<(MastNodeId, Arc<MastForest>), ExecutionError> {
703        let node_digest = external_node.digest();
704
705        let mast_forest = host
706            .get_mast_forest(&node_digest)
707            .ok_or(ExecutionError::no_mast_forest_with_procedure(node_digest, &()))?;
708
709        // We limit the parts of the program that can be called externally to procedure
710        // roots, even though MAST doesn't have that restriction.
711        let root_id = mast_forest
712            .find_procedure_root(node_digest)
713            .ok_or(ExecutionError::malfored_mast_forest_in_host(node_digest, &()))?;
714
715        // if the node that we got by looking up an external reference is also an External
716        // node, we are about to enter into an infinite loop - so, return an error
717        if mast_forest[root_id].is_external() {
718            return Err(ExecutionError::CircularExternalNode(node_digest));
719        }
720
721        // Merge the advice map of this forest into the advice provider.
722        // Note that the map may be merged multiple times if a different procedure from the same
723        // forest is called.
724        // For now, only compiled libraries contain non-empty advice maps, so for most cases,
725        // this call will be cheap.
726        self.advice
727            .extend_map(mast_forest.advice_map())
728            .map_err(|err| ExecutionError::advice_error(err, self.system.clk(), &()))?;
729
730        Ok((root_id, mast_forest))
731    }
732
733    // PUBLIC ACCESSORS
734    // ================================================================================================
735
736    pub const fn kernel(&self) -> &Kernel {
737        self.chiplets.kernel_rom.kernel()
738    }
739
740    pub fn into_parts(
741        self,
742    ) -> (System, Decoder, Stack, RangeChecker, Chiplets, PrecompileTranscriptState) {
743        (
744            self.system,
745            self.decoder,
746            self.stack,
747            self.range,
748            self.chiplets,
749            self.pc_transcript_state,
750        )
751    }
752}
753
754#[derive(Debug)]
755pub struct SlowProcessState<'a> {
756    advice: &'a mut AdviceProvider,
757    system: &'a System,
758    stack: &'a Stack,
759    chiplets: &'a Chiplets,
760}
761
762// PROCESS STATE
763// ================================================================================================
764
765#[derive(Debug)]
766pub enum ProcessState<'a> {
767    Slow(SlowProcessState<'a>),
768    Fast(FastProcessState<'a>),
769    /// A process state that does nothing. Calling any of its methods results in a panic. It is
770    /// expected to be used in conjunction with the `NoopHost`.
771    Noop(()),
772}
773
774impl Process {
775    #[inline(always)]
776    pub fn state(&mut self) -> ProcessState<'_> {
777        ProcessState::Slow(SlowProcessState {
778            advice: &mut self.advice,
779            system: &self.system,
780            stack: &self.stack,
781            chiplets: &self.chiplets,
782        })
783    }
784}
785
786impl<'a> ProcessState<'a> {
787    /// Returns a reference to the advice provider.
788    #[inline(always)]
789    pub fn advice_provider(&self) -> &AdviceProvider {
790        match self {
791            ProcessState::Slow(state) => state.advice,
792            ProcessState::Fast(state) => &state.processor.advice,
793            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
794        }
795    }
796
797    /// Returns a mutable reference to the advice provider.
798    #[inline(always)]
799    pub fn advice_provider_mut(&mut self) -> &mut AdviceProvider {
800        match self {
801            ProcessState::Slow(state) => state.advice,
802            ProcessState::Fast(state) => &mut state.processor.advice,
803            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
804        }
805    }
806
807    /// Returns the current clock cycle of a process.
808    #[inline(always)]
809    pub fn clk(&self) -> RowIndex {
810        match self {
811            ProcessState::Slow(state) => state.system.clk(),
812            ProcessState::Fast(state) => state.processor.clk,
813            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
814        }
815    }
816
817    /// Returns the current execution context ID.
818    #[inline(always)]
819    pub fn ctx(&self) -> ContextId {
820        match self {
821            ProcessState::Slow(state) => state.system.ctx(),
822            ProcessState::Fast(state) => state.processor.ctx,
823            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
824        }
825    }
826
827    /// Returns the value located at the specified position on the stack at the current clock cycle.
828    ///
829    /// This method can access elements beyond the top 16 positions by using the overflow table.
830    #[inline(always)]
831    pub fn get_stack_item(&self, pos: usize) -> Felt {
832        match self {
833            ProcessState::Slow(state) => state.stack.get(pos),
834            ProcessState::Fast(state) => state.processor.stack_get(pos),
835            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
836        }
837    }
838
839    /// Returns a word starting at the specified element index on the stack in big-endian
840    /// (reversed) order.
841    ///
842    /// The word is formed by taking 4 consecutive elements starting from the specified index.
843    /// For example, start_idx=0 creates a word from stack elements 0-3, start_idx=1 creates
844    /// a word from elements 1-4, etc.
845    ///
846    /// In big-endian order, stack element N+3 will be at position 0 of the word, N+2 at
847    /// position 1, N+1 at position 2, and N at position 3. This matches the behavior of
848    /// `mem_loadw_be` where `mem[a+3]` ends up on top of the stack.
849    ///
850    /// This method can access elements beyond the top 16 positions by using the overflow table.
851    /// Creating a word does not change the state of the stack.
852    #[inline(always)]
853    pub fn get_stack_word_be(&self, start_idx: usize) -> Word {
854        match self {
855            ProcessState::Slow(state) => state.stack.get_word(start_idx),
856            ProcessState::Fast(state) => state.processor.stack_get_word(start_idx),
857            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
858        }
859    }
860
861    /// Returns a word starting at the specified element index on the stack in little-endian
862    /// (memory) order.
863    ///
864    /// The word is formed by taking 4 consecutive elements starting from the specified index.
865    /// For example, start_idx=0 creates a word from stack elements 0-3, start_idx=1 creates
866    /// a word from elements 1-4, etc.
867    ///
868    /// In little-endian order, stack element N will be at position 0 of the word, N+1 at
869    /// position 1, N+2 at position 2, and N+3 at position 3. This matches the behavior of
870    /// `mem_loadw_le` where `mem[a]` ends up on top of the stack.
871    ///
872    /// This method can access elements beyond the top 16 positions by using the overflow table.
873    /// Creating a word does not change the state of the stack.
874    #[inline(always)]
875    pub fn get_stack_word_le(&self, start_idx: usize) -> Word {
876        let mut word = self.get_stack_word_be(start_idx);
877        word.reverse();
878        word
879    }
880
881    /// Returns a word starting at the specified element index on the stack.
882    ///
883    /// This is an alias for [`Self::get_stack_word_be`] for backward compatibility. For new code,
884    /// prefer using the explicit `get_stack_word_be()` or `get_stack_word_le()` to make the
885    /// ordering expectations clear.
886    ///
887    /// See [`Self::get_stack_word_be`] for detailed documentation.
888    #[deprecated(
889        since = "0.19.0",
890        note = "Use `get_stack_word_be()` or `get_stack_word_le()` to make endianness explicit"
891    )]
892    #[inline(always)]
893    pub fn get_stack_word(&self, start_idx: usize) -> Word {
894        self.get_stack_word_be(start_idx)
895    }
896
897    /// Returns stack state at the current clock cycle. This includes the top 16 items of the
898    /// stack + overflow entries.
899    #[inline(always)]
900    pub fn get_stack_state(&self) -> Vec<Felt> {
901        match self {
902            ProcessState::Slow(state) => state.stack.get_state_at(state.system.clk()),
903            ProcessState::Fast(state) => state.processor.stack().iter().rev().copied().collect(),
904            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
905        }
906    }
907
908    /// Returns the element located at the specified context/address, or None if the address hasn't
909    /// been accessed previously.
910    #[inline(always)]
911    pub fn get_mem_value(&self, ctx: ContextId, addr: u32) -> Option<Felt> {
912        match self {
913            ProcessState::Slow(state) => state.chiplets.memory.get_value(ctx, addr),
914            ProcessState::Fast(state) => state.processor.memory.read_element_impl(ctx, addr),
915            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
916        }
917    }
918
919    /// Returns the batch of elements starting at the specified context/address.
920    ///
921    /// # Errors
922    /// - If the address is not word aligned.
923    #[inline(always)]
924    pub fn get_mem_word(&self, ctx: ContextId, addr: u32) -> Result<Option<Word>, MemoryError> {
925        match self {
926            ProcessState::Slow(state) => state.chiplets.memory.get_word(ctx, addr),
927            ProcessState::Fast(state) => {
928                state.processor.memory.read_word_impl(ctx, addr, None, &())
929            },
930            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
931        }
932    }
933
934    /// Reads (start_addr, end_addr) tuple from the specified elements of the operand stack (
935    /// without modifying the state of the stack), and verifies that memory range is valid.
936    pub fn get_mem_addr_range(
937        &self,
938        start_idx: usize,
939        end_idx: usize,
940    ) -> Result<core::ops::Range<u32>, MemoryError> {
941        let start_addr = self.get_stack_item(start_idx).as_int();
942        let end_addr = self.get_stack_item(end_idx).as_int();
943
944        if start_addr > u32::MAX as u64 {
945            return Err(MemoryError::address_out_of_bounds(start_addr, &()));
946        }
947        if end_addr > u32::MAX as u64 {
948            return Err(MemoryError::address_out_of_bounds(end_addr, &()));
949        }
950
951        if start_addr > end_addr {
952            return Err(MemoryError::InvalidMemoryRange { start_addr, end_addr });
953        }
954
955        Ok(start_addr as u32..end_addr as u32)
956    }
957
958    /// Returns the entire memory state for the specified execution context at the current clock
959    /// cycle.
960    ///
961    /// The state is returned as a vector of (address, value) tuples, and includes addresses which
962    /// have been accessed at least once.
963    #[inline(always)]
964    pub fn get_mem_state(&self, ctx: ContextId) -> Vec<(MemoryAddress, Felt)> {
965        match self {
966            ProcessState::Slow(state) => {
967                state.chiplets.memory.get_state_at(ctx, state.system.clk())
968            },
969            ProcessState::Fast(state) => state.processor.memory.get_memory_state(ctx),
970            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
971        }
972    }
973}
974
975impl<'a> From<&'a mut Process> for ProcessState<'a> {
976    fn from(process: &'a mut Process) -> Self {
977        process.state()
978    }
979}
980
981// HELPERS
982// ================================================================================================
983
984/// For errors generated from processing an `ExternalNode`, returns the same error except with
985/// proper error context.
986pub(crate) fn add_error_ctx_to_external_error(
987    result: Result<(), ExecutionError>,
988    err_ctx: impl ErrorContext,
989) -> Result<(), ExecutionError> {
990    match result {
991        Ok(_) => Ok(()),
992        // Add context information to any errors coming from executing an `ExternalNode`
993        Err(err) => match err {
994            ExecutionError::NoMastForestWithProcedure { label, source_file: _, root_digest }
995            | ExecutionError::MalformedMastForestInHost { label, source_file: _, root_digest } => {
996                if label == SourceSpan::UNKNOWN {
997                    let err_with_ctx =
998                        ExecutionError::no_mast_forest_with_procedure(root_digest, &err_ctx);
999                    Err(err_with_ctx)
1000                } else {
1001                    // If the source span was already populated, just return the error as-is. This
1002                    // would occur when a call deeper down the call stack was responsible for the
1003                    // error.
1004                    Err(err)
1005                }
1006            },
1007
1008            _ => {
1009                // do nothing
1010                Err(err)
1011            },
1012        },
1013    }
1014}