miden_processor/
lib.rs

1#![no_std]
2
3#[macro_use]
4extern crate alloc;
5
6#[cfg(feature = "std")]
7extern crate std;
8
9use alloc::{sync::Arc, vec::Vec};
10use core::fmt::{Display, LowerHex};
11
12use miden_air::trace::{
13    CHIPLETS_WIDTH, DECODER_TRACE_WIDTH, MIN_TRACE_LEN, RANGE_CHECK_TRACE_WIDTH, STACK_TRACE_WIDTH,
14    SYS_TRACE_WIDTH,
15};
16pub use miden_air::{ExecutionOptions, ExecutionOptionsError, RowIndex};
17pub use miden_core::{
18    AssemblyOp, EMPTY_WORD, Felt, Kernel, ONE, Operation, Program, ProgramInfo, QuadExtension,
19    StackInputs, StackOutputs, Word, ZERO,
20    crypto::merkle::SMT_DEPTH,
21    errors::InputError,
22    mast::{MastForest, MastNode, MastNodeId},
23    sys_events::SystemEvent,
24    utils::{DeserializationError, collections::KvMap},
25};
26use miden_core::{
27    Decorator, DecoratorIterator, FieldElement, WORD_SIZE,
28    mast::{
29        BasicBlockNode, CallNode, DynNode, ExternalNode, JoinNode, LoopNode, OP_GROUP_SIZE,
30        OpBatch, SplitNode,
31    },
32};
33use miden_debug_types::SourceSpan;
34pub use winter_prover::matrix::ColMatrix;
35
36pub(crate) mod continuation_stack;
37
38pub mod fast;
39use fast::FastProcessState;
40
41mod operations;
42
43mod system;
44use system::System;
45pub use system::{ContextId, FMP_MIN, SYSCALL_FMP_MIN};
46
47mod decoder;
48use decoder::Decoder;
49
50mod stack;
51use stack::Stack;
52
53mod range;
54use range::RangeChecker;
55
56mod host;
57pub use host::{
58    AdviceMutation, AsyncHost, BaseHost, FutureMaybeSend, MastForestStore, MemMastForestStore,
59    SyncHost,
60    advice::{AdviceError, AdviceInputs, AdviceProvider},
61    default::{DefaultDebugHandler, DefaultHost, HostLibrary},
62    handlers::{DebugHandler, EventError, EventHandler, EventHandlerRegistry},
63};
64
65mod chiplets;
66use chiplets::Chiplets;
67pub use chiplets::MemoryError;
68
69mod trace;
70use trace::TraceFragment;
71pub use trace::{ChipletsLengths, ExecutionTrace, NUM_RAND_ROWS, TraceLenSummary};
72
73mod errors;
74pub use errors::{ErrorContext, ErrorContextImpl, ExecutionError};
75
76pub mod utils;
77
78#[cfg(test)]
79mod tests;
80
81mod debug;
82pub use debug::{AsmOpInfo, VmState, VmStateIterator};
83
84// RE-EXPORTS
85// ================================================================================================
86
87pub mod math {
88    pub use miden_core::{Felt, FieldElement, StarkField};
89    pub use winter_prover::math::fft;
90}
91
92pub mod crypto {
93    pub use miden_core::crypto::{
94        hash::{Blake3_192, Blake3_256, ElementHasher, Hasher, Rpo256, Rpx256},
95        merkle::{
96            MerkleError, MerklePath, MerkleStore, MerkleTree, NodeIndex, PartialMerkleTree,
97            SimpleSmt,
98        },
99        random::{RandomCoin, RpoRandomCoin, RpxRandomCoin, WinterRandomCoin},
100    };
101}
102
103// TYPE ALIASES
104// ================================================================================================
105
106#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
107pub struct MemoryAddress(u32);
108
109impl From<u32> for MemoryAddress {
110    fn from(addr: u32) -> Self {
111        MemoryAddress(addr)
112    }
113}
114
115impl From<MemoryAddress> for u32 {
116    fn from(value: MemoryAddress) -> Self {
117        value.0
118    }
119}
120
121impl Display for MemoryAddress {
122    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
123        Display::fmt(&self.0, f)
124    }
125}
126
127impl LowerHex for MemoryAddress {
128    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
129        LowerHex::fmt(&self.0, f)
130    }
131}
132
133impl core::ops::Add<MemoryAddress> for MemoryAddress {
134    type Output = Self;
135
136    fn add(self, rhs: MemoryAddress) -> Self::Output {
137        MemoryAddress(self.0 + rhs.0)
138    }
139}
140
141impl core::ops::Add<u32> for MemoryAddress {
142    type Output = Self;
143
144    fn add(self, rhs: u32) -> Self::Output {
145        MemoryAddress(self.0 + rhs)
146    }
147}
148
149type SysTrace = [Vec<Felt>; SYS_TRACE_WIDTH];
150
151pub struct DecoderTrace {
152    trace: [Vec<Felt>; DECODER_TRACE_WIDTH],
153    aux_builder: decoder::AuxTraceBuilder,
154}
155
156pub struct StackTrace {
157    trace: [Vec<Felt>; STACK_TRACE_WIDTH],
158}
159
160pub struct RangeCheckTrace {
161    trace: [Vec<Felt>; RANGE_CHECK_TRACE_WIDTH],
162    aux_builder: range::AuxTraceBuilder,
163}
164
165pub struct ChipletsTrace {
166    trace: [Vec<Felt>; CHIPLETS_WIDTH],
167    aux_builder: chiplets::AuxTraceBuilder,
168}
169
170// EXECUTORS
171// ================================================================================================
172
173/// Returns an execution trace resulting from executing the provided program against the provided
174/// inputs.
175///
176/// The `host` parameter is used to provide the external environment to the program being executed,
177/// such as access to the advice provider and libraries that the program depends on.
178#[tracing::instrument("execute_program", skip_all)]
179pub fn execute(
180    program: &Program,
181    stack_inputs: StackInputs,
182    advice_inputs: AdviceInputs,
183    host: &mut impl SyncHost,
184    options: ExecutionOptions,
185) -> Result<ExecutionTrace, ExecutionError> {
186    let mut process = Process::new(program.kernel().clone(), stack_inputs, advice_inputs, options);
187    let stack_outputs = process.execute(program, host)?;
188    let trace = ExecutionTrace::new(process, stack_outputs);
189    assert_eq!(&program.hash(), trace.program_hash(), "inconsistent program hash");
190    Ok(trace)
191}
192
193/// Returns an iterator which allows callers to step through the execution and inspect VM state at
194/// each execution step.
195pub fn execute_iter(
196    program: &Program,
197    stack_inputs: StackInputs,
198    advice_inputs: AdviceInputs,
199    host: &mut impl SyncHost,
200) -> VmStateIterator {
201    let mut process = Process::new_debug(program.kernel().clone(), stack_inputs, advice_inputs);
202    let result = process.execute(program, host);
203    if result.is_ok() {
204        assert_eq!(
205            program.hash(),
206            process.decoder.program_hash().into(),
207            "inconsistent program hash"
208        );
209    }
210    VmStateIterator::new(process, result)
211}
212
213// PROCESS
214// ================================================================================================
215
216/// A [Process] is the underlying execution engine for a Miden [Program].
217///
218/// Typically, you do not need to worry about, or use [Process] directly, instead you should prefer
219/// to use either [execute] or [execute_iter], which also handle setting up the process state,
220/// inputs, as well as compute the [ExecutionTrace] for the program.
221///
222/// However, for situations in which you want finer-grained control over those steps, you will need
223/// to construct an instance of [Process] using [Process::new], invoke [Process::execute], and then
224/// get the execution trace using [ExecutionTrace::new] using the outputs produced by execution.
225#[cfg(not(any(test, feature = "testing")))]
226pub struct Process {
227    advice: AdviceProvider,
228    system: System,
229    decoder: Decoder,
230    stack: Stack,
231    range: RangeChecker,
232    chiplets: Chiplets,
233    max_cycles: u32,
234    enable_tracing: bool,
235}
236
237#[cfg(any(test, feature = "testing"))]
238pub struct Process {
239    pub advice: AdviceProvider,
240    pub system: System,
241    pub decoder: Decoder,
242    pub stack: Stack,
243    pub range: RangeChecker,
244    pub chiplets: Chiplets,
245    pub max_cycles: u32,
246    pub enable_tracing: bool,
247}
248
249impl Process {
250    // CONSTRUCTORS
251    // --------------------------------------------------------------------------------------------
252    /// Creates a new process with the provided inputs.
253    pub fn new(
254        kernel: Kernel,
255        stack_inputs: StackInputs,
256        advice_inputs: AdviceInputs,
257        execution_options: ExecutionOptions,
258    ) -> Self {
259        Self::initialize(kernel, stack_inputs, advice_inputs, execution_options)
260    }
261
262    /// Creates a new process with provided inputs and debug options enabled.
263    pub fn new_debug(
264        kernel: Kernel,
265        stack_inputs: StackInputs,
266        advice_inputs: AdviceInputs,
267    ) -> Self {
268        Self::initialize(
269            kernel,
270            stack_inputs,
271            advice_inputs,
272            ExecutionOptions::default().with_tracing().with_debugging(true),
273        )
274    }
275
276    fn initialize(
277        kernel: Kernel,
278        stack: StackInputs,
279        advice_inputs: AdviceInputs,
280        execution_options: ExecutionOptions,
281    ) -> Self {
282        let in_debug_mode = execution_options.enable_debugging();
283        Self {
284            advice: advice_inputs.into(),
285            system: System::new(execution_options.expected_cycles() as usize),
286            decoder: Decoder::new(in_debug_mode),
287            stack: Stack::new(&stack, execution_options.expected_cycles() as usize, in_debug_mode),
288            range: RangeChecker::new(),
289            chiplets: Chiplets::new(kernel),
290            max_cycles: execution_options.max_cycles(),
291            enable_tracing: execution_options.enable_tracing(),
292        }
293    }
294
295    // PROGRAM EXECUTOR
296    // --------------------------------------------------------------------------------------------
297
298    /// Executes the provided [`Program`] in this process.
299    pub fn execute(
300        &mut self,
301        program: &Program,
302        host: &mut impl SyncHost,
303    ) -> Result<StackOutputs, ExecutionError> {
304        if self.system.clk() != 0 {
305            return Err(ExecutionError::ProgramAlreadyExecuted);
306        }
307
308        self.advice
309            .extend_map(program.mast_forest().advice_map())
310            .map_err(|err| ExecutionError::advice_error(err, RowIndex::from(0), &()))?;
311
312        self.execute_mast_node(program.entrypoint(), &program.mast_forest().clone(), host)?;
313
314        self.stack.build_stack_outputs()
315    }
316
317    // NODE EXECUTORS
318    // --------------------------------------------------------------------------------------------
319
320    fn execute_mast_node(
321        &mut self,
322        node_id: MastNodeId,
323        program: &MastForest,
324        host: &mut impl SyncHost,
325    ) -> Result<(), ExecutionError> {
326        let node = program
327            .get_node_by_id(node_id)
328            .ok_or(ExecutionError::MastNodeNotFoundInForest { node_id })?;
329
330        for &decorator_id in node.before_enter() {
331            self.execute_decorator(&program[decorator_id], host)?;
332        }
333
334        match node {
335            MastNode::Block(node) => self.execute_basic_block_node(node, program, host)?,
336            MastNode::Join(node) => self.execute_join_node(node, program, host)?,
337            MastNode::Split(node) => self.execute_split_node(node, program, host)?,
338            MastNode::Loop(node) => self.execute_loop_node(node, program, host)?,
339            MastNode::Call(node) => {
340                let err_ctx = err_ctx!(program, node, host);
341                add_error_ctx_to_external_error(
342                    self.execute_call_node(node, program, host),
343                    err_ctx,
344                )?
345            },
346            MastNode::Dyn(node) => {
347                let err_ctx = err_ctx!(program, node, host);
348                add_error_ctx_to_external_error(
349                    self.execute_dyn_node(node, program, host),
350                    err_ctx,
351                )?
352            },
353            MastNode::External(external_node) => {
354                let (root_id, mast_forest) = self.resolve_external_node(external_node, host)?;
355
356                self.execute_mast_node(root_id, &mast_forest, host)?;
357            },
358        }
359
360        for &decorator_id in node.after_exit() {
361            self.execute_decorator(&program[decorator_id], host)?;
362        }
363
364        Ok(())
365    }
366
367    /// Executes the specified [JoinNode].
368    #[inline(always)]
369    fn execute_join_node(
370        &mut self,
371        node: &JoinNode,
372        program: &MastForest,
373        host: &mut impl SyncHost,
374    ) -> Result<(), ExecutionError> {
375        self.start_join_node(node, program, host)?;
376
377        // execute first and then second child of the join block
378        self.execute_mast_node(node.first(), program, host)?;
379        self.execute_mast_node(node.second(), program, host)?;
380
381        self.end_join_node(node, program, host)
382    }
383
384    /// Executes the specified [SplitNode].
385    #[inline(always)]
386    fn execute_split_node(
387        &mut self,
388        node: &SplitNode,
389        program: &MastForest,
390        host: &mut impl SyncHost,
391    ) -> Result<(), ExecutionError> {
392        // start the SPLIT block; this also pops the stack and returns the popped element
393        let condition = self.start_split_node(node, program, host)?;
394
395        // execute either the true or the false branch of the split block based on the condition
396        if condition == ONE {
397            self.execute_mast_node(node.on_true(), program, host)?;
398        } else if condition == ZERO {
399            self.execute_mast_node(node.on_false(), program, host)?;
400        } else {
401            let err_ctx = err_ctx!(program, node, host);
402            return Err(ExecutionError::not_binary_value_if(condition, &err_ctx));
403        }
404
405        self.end_split_node(node, program, host)
406    }
407
408    /// Executes the specified [LoopNode].
409    #[inline(always)]
410    fn execute_loop_node(
411        &mut self,
412        node: &LoopNode,
413        program: &MastForest,
414        host: &mut impl SyncHost,
415    ) -> Result<(), ExecutionError> {
416        // start the LOOP block; this also pops the stack and returns the popped element
417        let condition = self.start_loop_node(node, program, host)?;
418
419        // if the top of the stack is ONE, execute the loop body; otherwise skip the loop body
420        if condition == ONE {
421            // execute the loop body at least once
422            self.execute_mast_node(node.body(), program, host)?;
423
424            // keep executing the loop body until the condition on the top of the stack is no
425            // longer ONE; each iteration of the loop is preceded by executing REPEAT operation
426            // which drops the condition from the stack
427            while self.stack.peek() == ONE {
428                self.decoder.repeat();
429                self.execute_op(Operation::Drop, program, host)?;
430                self.execute_mast_node(node.body(), program, host)?;
431            }
432
433            if self.stack.peek() != ZERO {
434                let err_ctx = err_ctx!(program, node, host);
435                return Err(ExecutionError::not_binary_value_loop(self.stack.peek(), &err_ctx));
436            }
437
438            // end the LOOP block and drop the condition from the stack
439            self.end_loop_node(node, true, program, host)
440        } else if condition == ZERO {
441            // end the LOOP block, but don't drop the condition from the stack because it was
442            // already dropped when we started the LOOP block
443            self.end_loop_node(node, false, program, host)
444        } else {
445            let err_ctx = err_ctx!(program, node, host);
446            Err(ExecutionError::not_binary_value_loop(condition, &err_ctx))
447        }
448    }
449
450    /// Executes the specified [CallNode].
451    #[inline(always)]
452    fn execute_call_node(
453        &mut self,
454        call_node: &CallNode,
455        program: &MastForest,
456        host: &mut impl SyncHost,
457    ) -> Result<(), ExecutionError> {
458        // call or syscall are not allowed inside a syscall
459        if self.system.in_syscall() {
460            let instruction = if call_node.is_syscall() { "syscall" } else { "call" };
461            return Err(ExecutionError::CallInSyscall(instruction));
462        }
463
464        // if this is a syscall, make sure the call target exists in the kernel
465        if call_node.is_syscall() {
466            let callee = program.get_node_by_id(call_node.callee()).ok_or_else(|| {
467                ExecutionError::MastNodeNotFoundInForest { node_id: call_node.callee() }
468            })?;
469            let err_ctx = err_ctx!(program, call_node, host);
470            self.chiplets.kernel_rom.access_proc(callee.digest(), &err_ctx)?;
471        }
472        let err_ctx = err_ctx!(program, call_node, host);
473
474        self.start_call_node(call_node, program, host)?;
475        self.execute_mast_node(call_node.callee(), program, host)?;
476        self.end_call_node(call_node, program, host, &err_ctx)
477    }
478
479    /// Executes the specified [miden_core::mast::DynNode].
480    ///
481    /// The MAST root of the callee is assumed to be at the top of the stack, and the callee is
482    /// expected to be either in the current `program` or in the host.
483    #[inline(always)]
484    fn execute_dyn_node(
485        &mut self,
486        node: &DynNode,
487        program: &MastForest,
488        host: &mut impl SyncHost,
489    ) -> Result<(), ExecutionError> {
490        // dyn calls are not allowed inside a syscall
491        if node.is_dyncall() && self.system.in_syscall() {
492            return Err(ExecutionError::CallInSyscall("dyncall"));
493        }
494
495        let err_ctx = err_ctx!(program, node, host);
496
497        let callee_hash = if node.is_dyncall() {
498            self.start_dyncall_node(node, &err_ctx)?
499        } else {
500            self.start_dyn_node(node, program, host, &err_ctx)?
501        };
502
503        // if the callee is not in the program's MAST forest, try to find a MAST forest for it in
504        // the host (corresponding to an external library loaded in the host); if none are
505        // found, return an error.
506        match program.find_procedure_root(callee_hash) {
507            Some(callee_id) => self.execute_mast_node(callee_id, program, host)?,
508            None => {
509                let mast_forest = host
510                    .get_mast_forest(&callee_hash)
511                    .ok_or_else(|| ExecutionError::dynamic_node_not_found(callee_hash, &err_ctx))?;
512
513                // We limit the parts of the program that can be called externally to procedure
514                // roots, even though MAST doesn't have that restriction.
515                let root_id = mast_forest
516                    .find_procedure_root(callee_hash)
517                    .ok_or(ExecutionError::malfored_mast_forest_in_host(callee_hash, &()))?;
518
519                // Merge the advice map of this forest into the advice provider.
520                // Note that the map may be merged multiple times if a different procedure from the
521                // same forest is called.
522                // For now, only compiled libraries contain non-empty advice maps, so for most
523                // cases, this call will be cheap.
524                self.advice
525                    .extend_map(mast_forest.advice_map())
526                    .map_err(|err| ExecutionError::advice_error(err, self.system.clk(), &()))?;
527
528                self.execute_mast_node(root_id, &mast_forest, host)?
529            },
530        }
531
532        if node.is_dyncall() {
533            self.end_dyncall_node(node, program, host, &err_ctx)
534        } else {
535            self.end_dyn_node(node, program, host)
536        }
537    }
538
539    /// Executes the specified [BasicBlockNode].
540    #[inline(always)]
541    fn execute_basic_block_node(
542        &mut self,
543        basic_block: &BasicBlockNode,
544        program: &MastForest,
545        host: &mut impl SyncHost,
546    ) -> Result<(), ExecutionError> {
547        self.start_basic_block_node(basic_block, program, host)?;
548
549        let mut op_offset = 0;
550        let mut decorator_ids = basic_block.decorator_iter();
551
552        // execute the first operation batch
553        self.execute_op_batch(
554            basic_block,
555            &basic_block.op_batches()[0],
556            &mut decorator_ids,
557            op_offset,
558            program,
559            host,
560        )?;
561        op_offset += basic_block.op_batches()[0].ops().len();
562
563        // if the span contains more operation batches, execute them. each additional batch is
564        // preceded by a RESPAN operation; executing RESPAN operation does not change the state
565        // of the stack
566        for op_batch in basic_block.op_batches().iter().skip(1) {
567            self.respan(op_batch);
568            self.execute_op(Operation::Noop, program, host)?;
569            self.execute_op_batch(
570                basic_block,
571                op_batch,
572                &mut decorator_ids,
573                op_offset,
574                program,
575                host,
576            )?;
577            op_offset += op_batch.ops().len();
578        }
579
580        self.end_basic_block_node(basic_block, program, host)?;
581
582        // execute any decorators which have not been executed during span ops execution; this
583        // can happen for decorators appearing after all operations in a block. these decorators
584        // are executed after SPAN block is closed to make sure the VM clock cycle advances beyond
585        // the last clock cycle of the SPAN block ops.
586        for &decorator_id in decorator_ids {
587            let decorator = program
588                .get_decorator_by_id(decorator_id)
589                .ok_or(ExecutionError::DecoratorNotFoundInForest { decorator_id })?;
590            self.execute_decorator(decorator, host)?;
591        }
592
593        Ok(())
594    }
595
596    /// Executes all operations in an [OpBatch]. This also ensures that all alignment rules are
597    /// satisfied by executing NOOPs as needed. Specifically:
598    /// - If an operation group ends with an operation carrying an immediate value, a NOOP is
599    ///   executed after it.
600    /// - If the number of groups in a batch is not a power of 2, NOOPs are executed (one per group)
601    ///   to bring it up to the next power of two (e.g., 3 -> 4, 5 -> 8).
602    #[inline(always)]
603    fn execute_op_batch(
604        &mut self,
605        basic_block: &BasicBlockNode,
606        batch: &OpBatch,
607        decorators: &mut DecoratorIterator,
608        op_offset: usize,
609        program: &MastForest,
610        host: &mut impl SyncHost,
611    ) -> Result<(), ExecutionError> {
612        let op_counts = batch.op_counts();
613        let mut op_idx = 0;
614        let mut group_idx = 0;
615        let mut next_group_idx = 1;
616
617        // round up the number of groups to be processed to the next power of two; we do this
618        // because the processor requires the number of groups to be either 1, 2, 4, or 8; if
619        // the actual number of groups is smaller, we'll pad the batch with NOOPs at the end
620        let num_batch_groups = batch.num_groups().next_power_of_two();
621
622        // execute operations in the batch one by one
623        for (i, &op) in batch.ops().iter().enumerate() {
624            while let Some(&decorator_id) = decorators.next_filtered(i + op_offset) {
625                let decorator = program
626                    .get_decorator_by_id(decorator_id)
627                    .ok_or(ExecutionError::DecoratorNotFoundInForest { decorator_id })?;
628                self.execute_decorator(decorator, host)?;
629            }
630
631            // decode and execute the operation
632            let err_ctx = err_ctx!(program, basic_block, host, i + op_offset);
633            self.decoder.execute_user_op(op, op_idx);
634            self.execute_op_with_error_ctx(op, program, host, &err_ctx)?;
635
636            // if the operation carries an immediate value, the value is stored at the next group
637            // pointer; so, we advance the pointer to the following group
638            let has_imm = op.imm_value().is_some();
639            if has_imm {
640                next_group_idx += 1;
641            }
642
643            // determine if we've executed all non-decorator operations in a group
644            if op_idx == op_counts[group_idx] - 1 {
645                // if we are at the end of the group, first check if the operation carries an
646                // immediate value
647                if has_imm {
648                    // an operation with an immediate value cannot be the last operation in a group
649                    // so, we need execute a NOOP after it. the assert also makes sure that there
650                    // is enough room in the group to execute a NOOP (if there isn't, there is a
651                    // bug somewhere in the assembler)
652                    debug_assert!(op_idx < OP_GROUP_SIZE - 1, "invalid op index");
653                    self.decoder.execute_user_op(Operation::Noop, op_idx + 1);
654                    self.execute_op(Operation::Noop, program, host)?;
655                }
656
657                // then, move to the next group and reset operation index
658                group_idx = next_group_idx;
659                next_group_idx += 1;
660                op_idx = 0;
661
662                // if we haven't reached the end of the batch yet, set up the decoder for
663                // decoding the next operation group
664                if group_idx < num_batch_groups {
665                    self.decoder.start_op_group(batch.groups()[group_idx]);
666                }
667            } else {
668                // if we are not at the end of the group, just increment the operation index
669                op_idx += 1;
670            }
671        }
672
673        // make sure we execute the required number of operation groups; this would happen when
674        // the actual number of operation groups was not a power of two
675        for group_idx in group_idx..num_batch_groups {
676            self.decoder.execute_user_op(Operation::Noop, 0);
677            self.execute_op(Operation::Noop, program, host)?;
678
679            // if we are not at the last group yet, set up the decoder for decoding the next
680            // operation groups. the groups were are processing are just NOOPs - so, the op group
681            // value is ZERO
682            if group_idx < num_batch_groups - 1 {
683                self.decoder.start_op_group(ZERO);
684            }
685        }
686
687        Ok(())
688    }
689
690    /// Executes the specified decorator
691    fn execute_decorator(
692        &mut self,
693        decorator: &Decorator,
694        host: &mut impl SyncHost,
695    ) -> Result<(), ExecutionError> {
696        match decorator {
697            Decorator::Debug(options) => {
698                if self.decoder.in_debug_mode() {
699                    let process = &mut self.state();
700                    host.on_debug(process, options)?;
701                }
702            },
703            Decorator::AsmOp(assembly_op) => {
704                if self.decoder.in_debug_mode() {
705                    self.decoder.append_asmop(self.system.clk(), assembly_op.clone());
706                }
707            },
708            Decorator::Trace(id) => {
709                if self.enable_tracing {
710                    let process = &mut self.state();
711                    host.on_trace(process, *id)?;
712                }
713            },
714        };
715        Ok(())
716    }
717
718    /// Resolves an external node reference to a procedure root using the [`MastForest`] store in
719    /// the provided host.
720    ///
721    /// The [`MastForest`] for the procedure is cached to avoid additional queries to the host.
722    fn resolve_external_node(
723        &mut self,
724        external_node: &ExternalNode,
725        host: &impl SyncHost,
726    ) -> Result<(MastNodeId, Arc<MastForest>), ExecutionError> {
727        let node_digest = external_node.digest();
728
729        let mast_forest = host
730            .get_mast_forest(&node_digest)
731            .ok_or(ExecutionError::no_mast_forest_with_procedure(node_digest, &()))?;
732
733        // We limit the parts of the program that can be called externally to procedure
734        // roots, even though MAST doesn't have that restriction.
735        let root_id = mast_forest
736            .find_procedure_root(node_digest)
737            .ok_or(ExecutionError::malfored_mast_forest_in_host(node_digest, &()))?;
738
739        // if the node that we got by looking up an external reference is also an External
740        // node, we are about to enter into an infinite loop - so, return an error
741        if mast_forest[root_id].is_external() {
742            return Err(ExecutionError::CircularExternalNode(node_digest));
743        }
744
745        // Merge the advice map of this forest into the advice provider.
746        // Note that the map may be merged multiple times if a different procedure from the same
747        // forest is called.
748        // For now, only compiled libraries contain non-empty advice maps, so for most cases,
749        // this call will be cheap.
750        self.advice
751            .extend_map(mast_forest.advice_map())
752            .map_err(|err| ExecutionError::advice_error(err, self.system.clk(), &()))?;
753
754        Ok((root_id, mast_forest))
755    }
756
757    // PUBLIC ACCESSORS
758    // ================================================================================================
759
760    pub const fn kernel(&self) -> &Kernel {
761        self.chiplets.kernel_rom.kernel()
762    }
763
764    pub fn into_parts(self) -> (System, Decoder, Stack, RangeChecker, Chiplets) {
765        (self.system, self.decoder, self.stack, self.range, self.chiplets)
766    }
767}
768
769#[derive(Debug)]
770pub struct SlowProcessState<'a> {
771    advice: &'a mut AdviceProvider,
772    system: &'a System,
773    stack: &'a Stack,
774    chiplets: &'a Chiplets,
775}
776
777// PROCESS STATE
778// ================================================================================================
779
780#[derive(Debug)]
781pub enum ProcessState<'a> {
782    Slow(SlowProcessState<'a>),
783    Fast(FastProcessState<'a>),
784}
785
786impl Process {
787    #[inline(always)]
788    pub fn state(&mut self) -> ProcessState<'_> {
789        ProcessState::Slow(SlowProcessState {
790            advice: &mut self.advice,
791            system: &self.system,
792            stack: &self.stack,
793            chiplets: &self.chiplets,
794        })
795    }
796}
797
798impl<'a> ProcessState<'a> {
799    /// Returns a reference to the advice provider.
800    #[inline(always)]
801    pub fn advice_provider(&self) -> &AdviceProvider {
802        match self {
803            ProcessState::Slow(state) => state.advice,
804            ProcessState::Fast(state) => &state.processor.advice,
805        }
806    }
807
808    /// Returns a mutable reference to the advice provider.
809    #[inline(always)]
810    pub fn advice_provider_mut(&mut self) -> &mut AdviceProvider {
811        match self {
812            ProcessState::Slow(state) => state.advice,
813            ProcessState::Fast(state) => &mut state.processor.advice,
814        }
815    }
816
817    /// Returns the current clock cycle of a process.
818    #[inline(always)]
819    pub fn clk(&self) -> RowIndex {
820        match self {
821            ProcessState::Slow(state) => state.system.clk(),
822            ProcessState::Fast(state) => state.processor.clk,
823        }
824    }
825
826    /// Returns the current execution context ID.
827    #[inline(always)]
828    pub fn ctx(&self) -> ContextId {
829        match self {
830            ProcessState::Slow(state) => state.system.ctx(),
831            ProcessState::Fast(state) => state.processor.ctx,
832        }
833    }
834
835    /// Returns the current value of the free memory pointer.
836    #[inline(always)]
837    pub fn fmp(&self) -> u64 {
838        match self {
839            ProcessState::Slow(state) => state.system.fmp().as_int(),
840            ProcessState::Fast(state) => state.processor.fmp.as_int(),
841        }
842    }
843
844    /// Returns the value located at the specified position on the stack at the current clock cycle.
845    #[inline(always)]
846    pub fn get_stack_item(&self, pos: usize) -> Felt {
847        match self {
848            ProcessState::Slow(state) => state.stack.get(pos),
849            ProcessState::Fast(state) => state.processor.stack_get(pos),
850        }
851    }
852
853    /// Returns a word located at the specified word index on the stack.
854    ///
855    /// Specifically, word 0 is defined by the first 4 elements of the stack, word 1 is defined
856    /// by the next 4 elements etc. Since the top of the stack contains 4 word, the highest valid
857    /// word index is 3.
858    ///
859    /// The words are created in reverse order. For example, for word 0 the top element of the
860    /// stack will be at the last position in the word.
861    ///
862    /// Creating a word does not change the state of the stack.
863    #[inline(always)]
864    pub fn get_stack_word(&self, word_idx: usize) -> Word {
865        match self {
866            ProcessState::Slow(state) => state.stack.get_word(word_idx),
867            ProcessState::Fast(state) => state.processor.stack_get_word(word_idx * WORD_SIZE),
868        }
869    }
870
871    /// Returns stack state at the current clock cycle. This includes the top 16 items of the
872    /// stack + overflow entries.
873    #[inline(always)]
874    pub fn get_stack_state(&self) -> Vec<Felt> {
875        match self {
876            ProcessState::Slow(state) => state.stack.get_state_at(state.system.clk()),
877            ProcessState::Fast(state) => state.processor.stack().iter().rev().copied().collect(),
878        }
879    }
880
881    /// Returns the element located at the specified context/address, or None if the address hasn't
882    /// been accessed previously.
883    #[inline(always)]
884    pub fn get_mem_value(&self, ctx: ContextId, addr: u32) -> Option<Felt> {
885        match self {
886            ProcessState::Slow(state) => state.chiplets.memory.get_value(ctx, addr),
887            ProcessState::Fast(state) => state.processor.memory.read_element_impl(ctx, addr),
888        }
889    }
890
891    /// Returns the batch of elements starting at the specified context/address.
892    ///
893    /// # Errors
894    /// - If the address is not word aligned.
895    #[inline(always)]
896    pub fn get_mem_word(&self, ctx: ContextId, addr: u32) -> Result<Option<Word>, MemoryError> {
897        match self {
898            ProcessState::Slow(state) => state.chiplets.memory.get_word(ctx, addr),
899            ProcessState::Fast(state) => {
900                state.processor.memory.read_word_impl(ctx, addr, None, &())
901            },
902        }
903    }
904
905    /// Returns the entire memory state for the specified execution context at the current clock
906    /// cycle.
907    ///
908    /// The state is returned as a vector of (address, value) tuples, and includes addresses which
909    /// have been accessed at least once.
910    #[inline(always)]
911    pub fn get_mem_state(&self, ctx: ContextId) -> Vec<(MemoryAddress, Felt)> {
912        match self {
913            ProcessState::Slow(state) => {
914                state.chiplets.memory.get_state_at(ctx, state.system.clk())
915            },
916            ProcessState::Fast(state) => state.processor.memory.get_memory_state(ctx),
917        }
918    }
919}
920
921impl<'a> From<&'a mut Process> for ProcessState<'a> {
922    fn from(process: &'a mut Process) -> Self {
923        process.state()
924    }
925}
926
927// HELPERS
928// ================================================================================================
929
930/// For errors generated from processing an `ExternalNode`, returns the same error except with
931/// proper error context.
932pub(crate) fn add_error_ctx_to_external_error(
933    result: Result<(), ExecutionError>,
934    err_ctx: impl ErrorContext,
935) -> Result<(), ExecutionError> {
936    match result {
937        Ok(_) => Ok(()),
938        // Add context information to any errors coming from executing an `ExternalNode`
939        Err(err) => match err {
940            ExecutionError::NoMastForestWithProcedure { label, source_file: _, root_digest }
941            | ExecutionError::MalformedMastForestInHost { label, source_file: _, root_digest } => {
942                if label == SourceSpan::UNKNOWN {
943                    let err_with_ctx =
944                        ExecutionError::no_mast_forest_with_procedure(root_digest, &err_ctx);
945                    Err(err_with_ctx)
946                } else {
947                    // If the source span was already populated, just return the error as-is. This
948                    // would occur when a call deeper down the call stack was responsible for the
949                    // error.
950                    Err(err)
951                }
952            },
953
954            _ => {
955                // do nothing
956                Err(err)
957            },
958        },
959    }
960}