miden_processor/
lib.rs

1#![no_std]
2
3#[macro_use]
4extern crate alloc;
5
6#[cfg(feature = "std")]
7extern crate std;
8
9use alloc::{sync::Arc, vec::Vec};
10use core::fmt::{Display, LowerHex};
11
12use miden_air::trace::{
13    CHIPLETS_WIDTH, DECODER_TRACE_WIDTH, MIN_TRACE_LEN, RANGE_CHECK_TRACE_WIDTH, STACK_TRACE_WIDTH,
14    SYS_TRACE_WIDTH,
15};
16pub use miden_air::{ExecutionOptions, ExecutionOptionsError, RowIndex};
17pub use miden_core::{
18    AssemblyOp, EMPTY_WORD, Felt, Kernel, ONE, Operation, Program, ProgramInfo, QuadExtension,
19    StackInputs, StackOutputs, WORD_SIZE, Word, ZERO,
20    crypto::merkle::SMT_DEPTH,
21    errors::InputError,
22    mast::{MastForest, MastNode, MastNodeExt, MastNodeId},
23    sys_events::SystemEvent,
24    utils::DeserializationError,
25};
26use miden_core::{
27    Decorator, DecoratorIdIterator, FieldElement,
28    mast::{
29        BasicBlockNode, CallNode, DynNode, ExternalNode, JoinNode, LoopNode, OpBatch, SplitNode,
30    },
31};
32use miden_debug_types::SourceSpan;
33pub use winter_prover::matrix::ColMatrix;
34
35pub(crate) mod continuation_stack;
36
37pub mod fast;
38use fast::FastProcessState;
39pub(crate) mod processor;
40
41mod operations;
42
43mod system;
44use system::System;
45pub use system::{ContextId, FMP_MIN, SYSCALL_FMP_MIN};
46
47pub(crate) mod decoder;
48use decoder::Decoder;
49
50mod stack;
51use stack::Stack;
52
53mod range;
54use range::RangeChecker;
55
56mod host;
57pub use host::{
58    AdviceMutation, AsyncHost, BaseHost, FutureMaybeSend, MastForestStore, MemMastForestStore,
59    SyncHost,
60    advice::{AdviceError, AdviceInputs, AdviceProvider},
61    default::{DefaultDebugHandler, DefaultHost, HostLibrary},
62    handlers::{DebugHandler, EventError, EventHandler, EventHandlerRegistry, NoopEventHandler},
63};
64
65mod chiplets;
66use chiplets::Chiplets;
67pub use chiplets::MemoryError;
68
69mod trace;
70use trace::TraceFragment;
71pub use trace::{ChipletsLengths, ExecutionTrace, NUM_RAND_ROWS, TraceLenSummary};
72
73mod errors;
74pub use errors::{ErrorContext, ErrorContextImpl, ExecutionError};
75
76pub mod utils;
77
78#[cfg(all(test, not(feature = "no_err_ctx")))]
79mod tests;
80
81mod debug;
82pub use debug::{AsmOpInfo, VmState, VmStateIterator};
83
84// RE-EXPORTS
85// ================================================================================================
86
87pub mod math {
88    pub use miden_core::{Felt, FieldElement, StarkField};
89    pub use winter_prover::math::fft;
90}
91
92pub mod crypto {
93    pub use miden_core::crypto::{
94        hash::{Blake3_192, Blake3_256, ElementHasher, Hasher, Poseidon2, Rpo256, Rpx256},
95        merkle::{
96            MerkleError, MerklePath, MerkleStore, MerkleTree, NodeIndex, PartialMerkleTree,
97            SimpleSmt,
98        },
99        random::{RandomCoin, RpoRandomCoin, RpxRandomCoin, WinterRandomCoin},
100    };
101}
102
103// TYPE ALIASES
104// ================================================================================================
105
106#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
107pub struct MemoryAddress(u32);
108
109impl From<u32> for MemoryAddress {
110    fn from(addr: u32) -> Self {
111        MemoryAddress(addr)
112    }
113}
114
115impl From<MemoryAddress> for u32 {
116    fn from(value: MemoryAddress) -> Self {
117        value.0
118    }
119}
120
121impl Display for MemoryAddress {
122    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
123        Display::fmt(&self.0, f)
124    }
125}
126
127impl LowerHex for MemoryAddress {
128    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
129        LowerHex::fmt(&self.0, f)
130    }
131}
132
133impl core::ops::Add<MemoryAddress> for MemoryAddress {
134    type Output = Self;
135
136    fn add(self, rhs: MemoryAddress) -> Self::Output {
137        MemoryAddress(self.0 + rhs.0)
138    }
139}
140
141impl core::ops::Add<u32> for MemoryAddress {
142    type Output = Self;
143
144    fn add(self, rhs: u32) -> Self::Output {
145        MemoryAddress(self.0 + rhs)
146    }
147}
148
149type SysTrace = [Vec<Felt>; SYS_TRACE_WIDTH];
150
151pub struct DecoderTrace {
152    trace: [Vec<Felt>; DECODER_TRACE_WIDTH],
153    aux_builder: decoder::AuxTraceBuilder,
154}
155
156pub struct StackTrace {
157    trace: [Vec<Felt>; STACK_TRACE_WIDTH],
158}
159
160pub struct RangeCheckTrace {
161    trace: [Vec<Felt>; RANGE_CHECK_TRACE_WIDTH],
162    aux_builder: range::AuxTraceBuilder,
163}
164
165pub struct ChipletsTrace {
166    trace: [Vec<Felt>; CHIPLETS_WIDTH],
167    aux_builder: chiplets::AuxTraceBuilder,
168}
169
170// EXECUTORS
171// ================================================================================================
172
173/// Returns an execution trace resulting from executing the provided program against the provided
174/// inputs.
175///
176/// The `host` parameter is used to provide the external environment to the program being executed,
177/// such as access to the advice provider and libraries that the program depends on.
178#[tracing::instrument("execute_program", skip_all)]
179pub fn execute(
180    program: &Program,
181    stack_inputs: StackInputs,
182    advice_inputs: AdviceInputs,
183    host: &mut impl SyncHost,
184    options: ExecutionOptions,
185) -> Result<ExecutionTrace, ExecutionError> {
186    let mut process = Process::new(program.kernel().clone(), stack_inputs, advice_inputs, options);
187    let stack_outputs = process.execute(program, host)?;
188    let trace = ExecutionTrace::new(process, stack_outputs);
189    assert_eq!(&program.hash(), trace.program_hash(), "inconsistent program hash");
190    Ok(trace)
191}
192
193/// Returns an iterator which allows callers to step through the execution and inspect VM state at
194/// each execution step.
195pub fn execute_iter(
196    program: &Program,
197    stack_inputs: StackInputs,
198    advice_inputs: AdviceInputs,
199    host: &mut impl SyncHost,
200) -> VmStateIterator {
201    let mut process = Process::new_debug(program.kernel().clone(), stack_inputs, advice_inputs);
202    let result = process.execute(program, host);
203    if result.is_ok() {
204        assert_eq!(
205            program.hash(),
206            process.decoder.program_hash().into(),
207            "inconsistent program hash"
208        );
209    }
210    VmStateIterator::new(process, result)
211}
212
213// PROCESS
214// ================================================================================================
215
216/// A [Process] is the underlying execution engine for a Miden [Program].
217///
218/// Typically, you do not need to worry about, or use [Process] directly, instead you should prefer
219/// to use either [execute] or [execute_iter], which also handle setting up the process state,
220/// inputs, as well as compute the [ExecutionTrace] for the program.
221///
222/// However, for situations in which you want finer-grained control over those steps, you will need
223/// to construct an instance of [Process] using [Process::new], invoke [Process::execute], and then
224/// get the execution trace using [ExecutionTrace::new] using the outputs produced by execution.
225#[cfg(not(any(test, feature = "testing")))]
226pub struct Process {
227    advice: AdviceProvider,
228    system: System,
229    decoder: Decoder,
230    stack: Stack,
231    range: RangeChecker,
232    chiplets: Chiplets,
233    max_cycles: u32,
234    enable_tracing: bool,
235}
236
237#[cfg(any(test, feature = "testing"))]
238pub struct Process {
239    pub advice: AdviceProvider,
240    pub system: System,
241    pub decoder: Decoder,
242    pub stack: Stack,
243    pub range: RangeChecker,
244    pub chiplets: Chiplets,
245    pub max_cycles: u32,
246    pub enable_tracing: bool,
247}
248
249impl Process {
250    // CONSTRUCTORS
251    // --------------------------------------------------------------------------------------------
252    /// Creates a new process with the provided inputs.
253    pub fn new(
254        kernel: Kernel,
255        stack_inputs: StackInputs,
256        advice_inputs: AdviceInputs,
257        execution_options: ExecutionOptions,
258    ) -> Self {
259        Self::initialize(kernel, stack_inputs, advice_inputs, execution_options)
260    }
261
262    /// Creates a new process with provided inputs and debug options enabled.
263    pub fn new_debug(
264        kernel: Kernel,
265        stack_inputs: StackInputs,
266        advice_inputs: AdviceInputs,
267    ) -> Self {
268        Self::initialize(
269            kernel,
270            stack_inputs,
271            advice_inputs,
272            ExecutionOptions::default().with_tracing().with_debugging(true),
273        )
274    }
275
276    fn initialize(
277        kernel: Kernel,
278        stack: StackInputs,
279        advice_inputs: AdviceInputs,
280        execution_options: ExecutionOptions,
281    ) -> Self {
282        let in_debug_mode = execution_options.enable_debugging();
283        Self {
284            advice: advice_inputs.into(),
285            system: System::new(execution_options.expected_cycles() as usize),
286            decoder: Decoder::new(in_debug_mode),
287            stack: Stack::new(&stack, execution_options.expected_cycles() as usize, in_debug_mode),
288            range: RangeChecker::new(),
289            chiplets: Chiplets::new(kernel),
290            max_cycles: execution_options.max_cycles(),
291            enable_tracing: execution_options.enable_tracing(),
292        }
293    }
294
295    // PROGRAM EXECUTOR
296    // --------------------------------------------------------------------------------------------
297
298    /// Executes the provided [`Program`] in this process.
299    pub fn execute(
300        &mut self,
301        program: &Program,
302        host: &mut impl SyncHost,
303    ) -> Result<StackOutputs, ExecutionError> {
304        if self.system.clk() != 0 {
305            return Err(ExecutionError::ProgramAlreadyExecuted);
306        }
307
308        self.advice
309            .extend_map(program.mast_forest().advice_map())
310            .map_err(|err| ExecutionError::advice_error(err, RowIndex::from(0), &()))?;
311
312        self.execute_mast_node(program.entrypoint(), &program.mast_forest().clone(), host)?;
313
314        self.stack.build_stack_outputs()
315    }
316
317    // NODE EXECUTORS
318    // --------------------------------------------------------------------------------------------
319
320    fn execute_mast_node(
321        &mut self,
322        node_id: MastNodeId,
323        program: &MastForest,
324        host: &mut impl SyncHost,
325    ) -> Result<(), ExecutionError> {
326        let node = program
327            .get_node_by_id(node_id)
328            .ok_or(ExecutionError::MastNodeNotFoundInForest { node_id })?;
329
330        for &decorator_id in node.before_enter() {
331            self.execute_decorator(&program[decorator_id], host)?;
332        }
333
334        match node {
335            MastNode::Block(node) => self.execute_basic_block_node(node, program, host)?,
336            MastNode::Join(node) => self.execute_join_node(node, program, host)?,
337            MastNode::Split(node) => self.execute_split_node(node, program, host)?,
338            MastNode::Loop(node) => self.execute_loop_node(node, program, host)?,
339            MastNode::Call(node) => {
340                let err_ctx = err_ctx!(program, node, host);
341                add_error_ctx_to_external_error(
342                    self.execute_call_node(node, program, host),
343                    err_ctx,
344                )?
345            },
346            MastNode::Dyn(node) => {
347                let err_ctx = err_ctx!(program, node, host);
348                add_error_ctx_to_external_error(
349                    self.execute_dyn_node(node, program, host),
350                    err_ctx,
351                )?
352            },
353            MastNode::External(external_node) => {
354                let (root_id, mast_forest) = self.resolve_external_node(external_node, host)?;
355
356                self.execute_mast_node(root_id, &mast_forest, host)?;
357            },
358        }
359
360        for &decorator_id in node.after_exit() {
361            self.execute_decorator(&program[decorator_id], host)?;
362        }
363
364        Ok(())
365    }
366
367    /// Executes the specified [JoinNode].
368    #[inline(always)]
369    fn execute_join_node(
370        &mut self,
371        node: &JoinNode,
372        program: &MastForest,
373        host: &mut impl SyncHost,
374    ) -> Result<(), ExecutionError> {
375        self.start_join_node(node, program, host)?;
376
377        // execute first and then second child of the join block
378        self.execute_mast_node(node.first(), program, host)?;
379        self.execute_mast_node(node.second(), program, host)?;
380
381        self.end_join_node(node, program, host)
382    }
383
384    /// Executes the specified [SplitNode].
385    #[inline(always)]
386    fn execute_split_node(
387        &mut self,
388        node: &SplitNode,
389        program: &MastForest,
390        host: &mut impl SyncHost,
391    ) -> Result<(), ExecutionError> {
392        // start the SPLIT block; this also pops the stack and returns the popped element
393        let condition = self.start_split_node(node, program, host)?;
394
395        // execute either the true or the false branch of the split block based on the condition
396        if condition == ONE {
397            self.execute_mast_node(node.on_true(), program, host)?;
398        } else if condition == ZERO {
399            self.execute_mast_node(node.on_false(), program, host)?;
400        } else {
401            let err_ctx = err_ctx!(program, node, host);
402            return Err(ExecutionError::not_binary_value_if(condition, &err_ctx));
403        }
404
405        self.end_split_node(node, program, host)
406    }
407
408    /// Executes the specified [LoopNode].
409    #[inline(always)]
410    fn execute_loop_node(
411        &mut self,
412        node: &LoopNode,
413        program: &MastForest,
414        host: &mut impl SyncHost,
415    ) -> Result<(), ExecutionError> {
416        // start the LOOP block; this also pops the stack and returns the popped element
417        let condition = self.start_loop_node(node, program, host)?;
418
419        // if the top of the stack is ONE, execute the loop body; otherwise skip the loop body
420        if condition == ONE {
421            // execute the loop body at least once
422            self.execute_mast_node(node.body(), program, host)?;
423
424            // keep executing the loop body until the condition on the top of the stack is no
425            // longer ONE; each iteration of the loop is preceded by executing REPEAT operation
426            // which drops the condition from the stack
427            while self.stack.peek() == ONE {
428                self.decoder.repeat();
429                self.execute_op(Operation::Drop, program, host)?;
430                self.execute_mast_node(node.body(), program, host)?;
431            }
432
433            if self.stack.peek() != ZERO {
434                let err_ctx = err_ctx!(program, node, host);
435                return Err(ExecutionError::not_binary_value_loop(self.stack.peek(), &err_ctx));
436            }
437
438            // end the LOOP block and drop the condition from the stack
439            self.end_loop_node(node, true, program, host)
440        } else if condition == ZERO {
441            // end the LOOP block, but don't drop the condition from the stack because it was
442            // already dropped when we started the LOOP block
443            self.end_loop_node(node, false, program, host)
444        } else {
445            let err_ctx = err_ctx!(program, node, host);
446            Err(ExecutionError::not_binary_value_loop(condition, &err_ctx))
447        }
448    }
449
450    /// Executes the specified [CallNode].
451    #[inline(always)]
452    fn execute_call_node(
453        &mut self,
454        call_node: &CallNode,
455        program: &MastForest,
456        host: &mut impl SyncHost,
457    ) -> Result<(), ExecutionError> {
458        // call or syscall are not allowed inside a syscall
459        if self.system.in_syscall() {
460            let instruction = if call_node.is_syscall() { "syscall" } else { "call" };
461            return Err(ExecutionError::CallInSyscall(instruction));
462        }
463
464        // if this is a syscall, make sure the call target exists in the kernel
465        if call_node.is_syscall() {
466            let callee = program.get_node_by_id(call_node.callee()).ok_or_else(|| {
467                ExecutionError::MastNodeNotFoundInForest { node_id: call_node.callee() }
468            })?;
469            let err_ctx = err_ctx!(program, call_node, host);
470            self.chiplets.kernel_rom.access_proc(callee.digest(), &err_ctx)?;
471        }
472        let err_ctx = err_ctx!(program, call_node, host);
473
474        self.start_call_node(call_node, program, host)?;
475        self.execute_mast_node(call_node.callee(), program, host)?;
476        self.end_call_node(call_node, program, host, &err_ctx)
477    }
478
479    /// Executes the specified [miden_core::mast::DynNode].
480    ///
481    /// The MAST root of the callee is assumed to be at the top of the stack, and the callee is
482    /// expected to be either in the current `program` or in the host.
483    #[inline(always)]
484    fn execute_dyn_node(
485        &mut self,
486        node: &DynNode,
487        program: &MastForest,
488        host: &mut impl SyncHost,
489    ) -> Result<(), ExecutionError> {
490        // dyn calls are not allowed inside a syscall
491        if node.is_dyncall() && self.system.in_syscall() {
492            return Err(ExecutionError::CallInSyscall("dyncall"));
493        }
494
495        let err_ctx = err_ctx!(program, node, host);
496
497        let callee_hash = if node.is_dyncall() {
498            self.start_dyncall_node(node, &err_ctx)?
499        } else {
500            self.start_dyn_node(node, program, host, &err_ctx)?
501        };
502
503        // if the callee is not in the program's MAST forest, try to find a MAST forest for it in
504        // the host (corresponding to an external library loaded in the host); if none are
505        // found, return an error.
506        match program.find_procedure_root(callee_hash) {
507            Some(callee_id) => self.execute_mast_node(callee_id, program, host)?,
508            None => {
509                let mast_forest = host
510                    .get_mast_forest(&callee_hash)
511                    .ok_or_else(|| ExecutionError::dynamic_node_not_found(callee_hash, &err_ctx))?;
512
513                // We limit the parts of the program that can be called externally to procedure
514                // roots, even though MAST doesn't have that restriction.
515                let root_id = mast_forest
516                    .find_procedure_root(callee_hash)
517                    .ok_or(ExecutionError::malfored_mast_forest_in_host(callee_hash, &()))?;
518
519                // Merge the advice map of this forest into the advice provider.
520                // Note that the map may be merged multiple times if a different procedure from the
521                // same forest is called.
522                // For now, only compiled libraries contain non-empty advice maps, so for most
523                // cases, this call will be cheap.
524                self.advice
525                    .extend_map(mast_forest.advice_map())
526                    .map_err(|err| ExecutionError::advice_error(err, self.system.clk(), &()))?;
527
528                self.execute_mast_node(root_id, &mast_forest, host)?
529            },
530        }
531
532        if node.is_dyncall() {
533            self.end_dyncall_node(node, program, host, &err_ctx)
534        } else {
535            self.end_dyn_node(node, program, host)
536        }
537    }
538
539    /// Executes the specified [BasicBlockNode].
540    #[inline(always)]
541    fn execute_basic_block_node(
542        &mut self,
543        basic_block: &BasicBlockNode,
544        program: &MastForest,
545        host: &mut impl SyncHost,
546    ) -> Result<(), ExecutionError> {
547        self.start_basic_block_node(basic_block, program, host)?;
548
549        let mut op_offset = 0;
550        let mut decorator_ids = basic_block.decorator_iter();
551
552        // execute the first operation batch
553        self.execute_op_batch(
554            basic_block,
555            &basic_block.op_batches()[0],
556            &mut decorator_ids,
557            op_offset,
558            program,
559            host,
560        )?;
561        op_offset += basic_block.op_batches()[0].ops().len();
562
563        // if the span contains more operation batches, execute them. each additional batch is
564        // preceded by a RESPAN operation; executing RESPAN operation does not change the state
565        // of the stack
566        for op_batch in basic_block.op_batches().iter().skip(1) {
567            self.respan(op_batch);
568            self.execute_op(Operation::Noop, program, host)?;
569            self.execute_op_batch(
570                basic_block,
571                op_batch,
572                &mut decorator_ids,
573                op_offset,
574                program,
575                host,
576            )?;
577            op_offset += op_batch.ops().len();
578        }
579
580        self.end_basic_block_node(basic_block, program, host)?;
581
582        // execute any decorators which have not been executed during span ops execution; this
583        // can happen for decorators appearing after all operations in a block. these decorators
584        // are executed after BASIC BLOCK is closed to make sure the VM clock cycle advances beyond
585        // the last clock cycle of the BASIC BLOCK ops.
586        for &decorator_id in decorator_ids {
587            let decorator = program
588                .get_decorator_by_id(decorator_id)
589                .ok_or(ExecutionError::DecoratorNotFoundInForest { decorator_id })?;
590            self.execute_decorator(decorator, host)?;
591        }
592
593        Ok(())
594    }
595
596    /// Executes all operations in an [OpBatch]. This also ensures that all alignment rules are
597    /// satisfied by executing NOOPs as needed. Specifically:   
598    /// - If an operation group ends with an operation carrying an immediate value, a NOOP is
599    ///   executed after it.
600    /// - If the number of groups in a batch is not a power of 2, NOOPs are executed (one per group)
601    ///   to bring it up to the next power of two (e.g., 3 -> 4, 5 -> 8).
602    #[inline(always)]
603    fn execute_op_batch(
604        &mut self,
605        basic_block: &BasicBlockNode,
606        batch: &OpBatch,
607        decorators: &mut DecoratorIdIterator,
608        op_offset: usize,
609        program: &MastForest,
610        host: &mut impl SyncHost,
611    ) -> Result<(), ExecutionError> {
612        let end_indices = batch.end_indices();
613        let mut op_idx = 0;
614        let mut group_idx = 0;
615        let mut next_group_idx = 1;
616
617        // round up the number of groups to be processed to the next power of two; we do this
618        // because the processor requires the number of groups to be either 1, 2, 4, or 8; if
619        // the actual number of groups is smaller, we'll pad the batch with NOOPs at the end
620        let num_batch_groups = batch.num_groups().next_power_of_two();
621
622        // execute operations in the batch one by one
623        for (i, &op) in batch.ops().iter().enumerate() {
624            while let Some(&decorator_id) = decorators.next_filtered(i + op_offset) {
625                let decorator = program
626                    .get_decorator_by_id(decorator_id)
627                    .ok_or(ExecutionError::DecoratorNotFoundInForest { decorator_id })?;
628                self.execute_decorator(decorator, host)?;
629            }
630
631            // decode and execute the operation
632            let err_ctx = err_ctx!(program, basic_block, host, i + op_offset);
633            self.decoder.execute_user_op(op, op_idx);
634            self.execute_op_with_error_ctx(op, program, host, &err_ctx)?;
635
636            // if the operation carries an immediate value, the value is stored at the next group
637            // pointer; so, we advance the pointer to the following group
638            let has_imm = op.imm_value().is_some();
639            if has_imm {
640                next_group_idx += 1;
641            }
642
643            // determine if we've executed all non-decorator operations in a group
644            if i + 1 == end_indices[group_idx] {
645                // move to the next group and reset operation index
646                group_idx = next_group_idx;
647                next_group_idx += 1;
648                op_idx = 0;
649
650                // if we haven't reached the end of the batch yet, set up the decoder for
651                // decoding the next operation group
652                if group_idx < num_batch_groups {
653                    self.decoder.start_op_group(batch.groups()[group_idx]);
654                }
655            } else {
656                // if we are not at the end of the group, just increment the operation index
657                op_idx += 1;
658            }
659        }
660
661        Ok(())
662    }
663
664    /// Executes the specified decorator
665    fn execute_decorator(
666        &mut self,
667        decorator: &Decorator,
668        host: &mut impl SyncHost,
669    ) -> Result<(), ExecutionError> {
670        match decorator {
671            Decorator::Debug(options) => {
672                if self.decoder.in_debug_mode() {
673                    let process = &mut self.state();
674                    host.on_debug(process, options)?;
675                }
676            },
677            Decorator::AsmOp(assembly_op) => {
678                if self.decoder.in_debug_mode() {
679                    self.decoder.append_asmop(self.system.clk(), assembly_op.clone());
680                }
681            },
682            Decorator::Trace(id) => {
683                if self.enable_tracing {
684                    let process = &mut self.state();
685                    host.on_trace(process, *id)?;
686                }
687            },
688        };
689        Ok(())
690    }
691
692    /// Resolves an external node reference to a procedure root using the [`MastForest`] store in
693    /// the provided host.
694    ///
695    /// The [`MastForest`] for the procedure is cached to avoid additional queries to the host.
696    fn resolve_external_node(
697        &mut self,
698        external_node: &ExternalNode,
699        host: &impl SyncHost,
700    ) -> Result<(MastNodeId, Arc<MastForest>), ExecutionError> {
701        let node_digest = external_node.digest();
702
703        let mast_forest = host
704            .get_mast_forest(&node_digest)
705            .ok_or(ExecutionError::no_mast_forest_with_procedure(node_digest, &()))?;
706
707        // We limit the parts of the program that can be called externally to procedure
708        // roots, even though MAST doesn't have that restriction.
709        let root_id = mast_forest
710            .find_procedure_root(node_digest)
711            .ok_or(ExecutionError::malfored_mast_forest_in_host(node_digest, &()))?;
712
713        // if the node that we got by looking up an external reference is also an External
714        // node, we are about to enter into an infinite loop - so, return an error
715        if mast_forest[root_id].is_external() {
716            return Err(ExecutionError::CircularExternalNode(node_digest));
717        }
718
719        // Merge the advice map of this forest into the advice provider.
720        // Note that the map may be merged multiple times if a different procedure from the same
721        // forest is called.
722        // For now, only compiled libraries contain non-empty advice maps, so for most cases,
723        // this call will be cheap.
724        self.advice
725            .extend_map(mast_forest.advice_map())
726            .map_err(|err| ExecutionError::advice_error(err, self.system.clk(), &()))?;
727
728        Ok((root_id, mast_forest))
729    }
730
731    // PUBLIC ACCESSORS
732    // ================================================================================================
733
734    pub const fn kernel(&self) -> &Kernel {
735        self.chiplets.kernel_rom.kernel()
736    }
737
738    pub fn into_parts(self) -> (System, Decoder, Stack, RangeChecker, Chiplets) {
739        (self.system, self.decoder, self.stack, self.range, self.chiplets)
740    }
741}
742
743#[derive(Debug)]
744pub struct SlowProcessState<'a> {
745    advice: &'a mut AdviceProvider,
746    system: &'a System,
747    stack: &'a Stack,
748    chiplets: &'a Chiplets,
749}
750
751// PROCESS STATE
752// ================================================================================================
753
754#[derive(Debug)]
755pub enum ProcessState<'a> {
756    Slow(SlowProcessState<'a>),
757    Fast(FastProcessState<'a>),
758    /// A process state that does nothing. Calling any of its methods results in a panic. It is
759    /// expected to be used in conjunction with the `NoopHost`.
760    Noop(()),
761}
762
763impl Process {
764    #[inline(always)]
765    pub fn state(&mut self) -> ProcessState<'_> {
766        ProcessState::Slow(SlowProcessState {
767            advice: &mut self.advice,
768            system: &self.system,
769            stack: &self.stack,
770            chiplets: &self.chiplets,
771        })
772    }
773}
774
775impl<'a> ProcessState<'a> {
776    /// Returns a reference to the advice provider.
777    #[inline(always)]
778    pub fn advice_provider(&self) -> &AdviceProvider {
779        match self {
780            ProcessState::Slow(state) => state.advice,
781            ProcessState::Fast(state) => &state.processor.advice,
782            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
783        }
784    }
785
786    /// Returns a mutable reference to the advice provider.
787    #[inline(always)]
788    pub fn advice_provider_mut(&mut self) -> &mut AdviceProvider {
789        match self {
790            ProcessState::Slow(state) => state.advice,
791            ProcessState::Fast(state) => &mut state.processor.advice,
792            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
793        }
794    }
795
796    /// Returns the current clock cycle of a process.
797    #[inline(always)]
798    pub fn clk(&self) -> RowIndex {
799        match self {
800            ProcessState::Slow(state) => state.system.clk(),
801            ProcessState::Fast(state) => state.processor.clk,
802            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
803        }
804    }
805
806    /// Returns the current execution context ID.
807    #[inline(always)]
808    pub fn ctx(&self) -> ContextId {
809        match self {
810            ProcessState::Slow(state) => state.system.ctx(),
811            ProcessState::Fast(state) => state.processor.ctx,
812            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
813        }
814    }
815
816    /// Returns the current value of the free memory pointer.
817    #[inline(always)]
818    pub fn fmp(&self) -> u64 {
819        match self {
820            ProcessState::Slow(state) => state.system.fmp().as_int(),
821            ProcessState::Fast(state) => state.processor.fmp.as_int(),
822            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
823        }
824    }
825
826    /// Returns the value located at the specified position on the stack at the current clock cycle.
827    ///
828    /// This method can access elements beyond the top 16 positions by using the overflow table.
829    #[inline(always)]
830    pub fn get_stack_item(&self, pos: usize) -> Felt {
831        match self {
832            ProcessState::Slow(state) => state.stack.get(pos),
833            ProcessState::Fast(state) => state.processor.stack_get(pos),
834            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
835        }
836    }
837
838    /// Returns a word starting at the specified element index on the stack.
839    ///
840    /// The word is formed by taking 4 consecutive elements starting from the specified index.
841    /// For example, start_idx=0 creates a word from stack elements 0-3, start_idx=1 creates
842    /// a word from elements 1-4, etc.
843    ///
844    /// The words are created in reverse order. For a word starting at index N, stack element
845    /// N+3 will be at position 0 of the word, N+2 at position 1, N+1 at position 2, and N
846    /// at position 3.
847    ///
848    /// This method can access elements beyond the top 16 positions by using the overflow table.
849    /// Creating a word does not change the state of the stack.
850    #[inline(always)]
851    pub fn get_stack_word(&self, start_idx: usize) -> Word {
852        match self {
853            ProcessState::Slow(state) => state.stack.get_word(start_idx),
854            ProcessState::Fast(state) => state.processor.stack_get_word(start_idx),
855            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
856        }
857    }
858
859    /// Returns stack state at the current clock cycle. This includes the top 16 items of the
860    /// stack + overflow entries.
861    #[inline(always)]
862    pub fn get_stack_state(&self) -> Vec<Felt> {
863        match self {
864            ProcessState::Slow(state) => state.stack.get_state_at(state.system.clk()),
865            ProcessState::Fast(state) => state.processor.stack().iter().rev().copied().collect(),
866            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
867        }
868    }
869
870    /// Returns the element located at the specified context/address, or None if the address hasn't
871    /// been accessed previously.
872    #[inline(always)]
873    pub fn get_mem_value(&self, ctx: ContextId, addr: u32) -> Option<Felt> {
874        match self {
875            ProcessState::Slow(state) => state.chiplets.memory.get_value(ctx, addr),
876            ProcessState::Fast(state) => state.processor.memory.read_element_impl(ctx, addr),
877            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
878        }
879    }
880
881    /// Returns the batch of elements starting at the specified context/address.
882    ///
883    /// # Errors
884    /// - If the address is not word aligned.
885    #[inline(always)]
886    pub fn get_mem_word(&self, ctx: ContextId, addr: u32) -> Result<Option<Word>, MemoryError> {
887        match self {
888            ProcessState::Slow(state) => state.chiplets.memory.get_word(ctx, addr),
889            ProcessState::Fast(state) => {
890                state.processor.memory.read_word_impl(ctx, addr, None, &())
891            },
892            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
893        }
894    }
895
896    /// Reads (start_addr, end_addr) tuple from the specified elements of the operand stack (
897    /// without modifying the state of the stack), and verifies that memory range is valid.
898    pub fn get_mem_addr_range(
899        &self,
900        start_idx: usize,
901        end_idx: usize,
902    ) -> Result<core::ops::Range<u32>, MemoryError> {
903        let start_addr = self.get_stack_item(start_idx).as_int();
904        let end_addr = self.get_stack_item(end_idx).as_int();
905
906        if start_addr > u32::MAX as u64 {
907            return Err(MemoryError::address_out_of_bounds(start_addr, &()));
908        }
909        if end_addr > u32::MAX as u64 {
910            return Err(MemoryError::address_out_of_bounds(end_addr, &()));
911        }
912
913        if start_addr > end_addr {
914            return Err(MemoryError::InvalidMemoryRange { start_addr, end_addr });
915        }
916
917        Ok(start_addr as u32..end_addr as u32)
918    }
919
920    /// Returns the entire memory state for the specified execution context at the current clock
921    /// cycle.
922    ///
923    /// The state is returned as a vector of (address, value) tuples, and includes addresses which
924    /// have been accessed at least once.
925    #[inline(always)]
926    pub fn get_mem_state(&self, ctx: ContextId) -> Vec<(MemoryAddress, Felt)> {
927        match self {
928            ProcessState::Slow(state) => {
929                state.chiplets.memory.get_state_at(ctx, state.system.clk())
930            },
931            ProcessState::Fast(state) => state.processor.memory.get_memory_state(ctx),
932            ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
933        }
934    }
935}
936
937impl<'a> From<&'a mut Process> for ProcessState<'a> {
938    fn from(process: &'a mut Process) -> Self {
939        process.state()
940    }
941}
942
943// HELPERS
944// ================================================================================================
945
946/// For errors generated from processing an `ExternalNode`, returns the same error except with
947/// proper error context.
948pub(crate) fn add_error_ctx_to_external_error(
949    result: Result<(), ExecutionError>,
950    err_ctx: impl ErrorContext,
951) -> Result<(), ExecutionError> {
952    match result {
953        Ok(_) => Ok(()),
954        // Add context information to any errors coming from executing an `ExternalNode`
955        Err(err) => match err {
956            ExecutionError::NoMastForestWithProcedure { label, source_file: _, root_digest }
957            | ExecutionError::MalformedMastForestInHost { label, source_file: _, root_digest } => {
958                if label == SourceSpan::UNKNOWN {
959                    let err_with_ctx =
960                        ExecutionError::no_mast_forest_with_procedure(root_digest, &err_ctx);
961                    Err(err_with_ctx)
962                } else {
963                    // If the source span was already populated, just return the error as-is. This
964                    // would occur when a call deeper down the call stack was responsible for the
965                    // error.
966                    Err(err)
967                }
968            },
969
970            _ => {
971                // do nothing
972                Err(err)
973            },
974        },
975    }
976}