miden_processor/
lib.rs

1#![no_std]
2
3#[macro_use]
4extern crate alloc;
5
6#[cfg(feature = "std")]
7extern crate std;
8
9use alloc::{sync::Arc, vec::Vec};
10use core::fmt::{Display, LowerHex};
11
12use miden_air::trace::{
13    CHIPLETS_WIDTH, DECODER_TRACE_WIDTH, MIN_TRACE_LEN, RANGE_CHECK_TRACE_WIDTH, STACK_TRACE_WIDTH,
14    SYS_TRACE_WIDTH,
15};
16pub use miden_air::{ExecutionOptions, ExecutionOptionsError, RowIndex};
17pub use miden_core::{
18    AssemblyOp, EMPTY_WORD, Felt, Kernel, ONE, Operation, Program, ProgramInfo, QuadExtension,
19    StackInputs, StackOutputs, Word, ZERO,
20    crypto::merkle::SMT_DEPTH,
21    errors::InputError,
22    mast::{MastForest, MastNode, MastNodeId},
23    sys_events::SystemEvent,
24    utils::{DeserializationError, collections::KvMap},
25};
26use miden_core::{
27    Decorator, DecoratorIterator, FieldElement, WORD_SIZE,
28    mast::{
29        BasicBlockNode, CallNode, DynNode, JoinNode, LoopNode, OP_GROUP_SIZE, OpBatch, SplitNode,
30    },
31};
32use miden_debug_types::{DefaultSourceManager, SourceManager, SourceSpan};
33use utils::resolve_external_node;
34pub use winter_prover::matrix::ColMatrix;
35
36pub mod fast;
37use fast::FastProcessState;
38
39mod operations;
40
41mod system;
42use system::System;
43pub use system::{ContextId, FMP_MIN, SYSCALL_FMP_MIN};
44
45mod decoder;
46use decoder::Decoder;
47
48mod stack;
49use stack::Stack;
50
51mod range;
52use range::RangeChecker;
53
54mod host;
55pub use host::{
56    AsyncHost, BaseHost, DefaultHost, MastForestStore, MemMastForestStore, SyncHost,
57    advice::{AdviceError, AdviceInputs, AdviceProvider},
58};
59
60mod chiplets;
61use chiplets::Chiplets;
62pub use chiplets::MemoryError;
63
64mod trace;
65use trace::TraceFragment;
66pub use trace::{ChipletsLengths, ExecutionTrace, NUM_RAND_ROWS, TraceLenSummary};
67
68mod errors;
69pub use errors::{ErrorContext, ExecutionError};
70
71pub mod utils;
72
73#[cfg(test)]
74mod tests;
75
76mod debug;
77pub use debug::{AsmOpInfo, VmState, VmStateIterator};
78
79// RE-EXPORTS
80// ================================================================================================
81
82pub mod math {
83    pub use miden_core::{Felt, FieldElement, StarkField};
84    pub use winter_prover::math::fft;
85}
86
87pub mod crypto {
88    pub use miden_core::crypto::{
89        hash::{Blake3_192, Blake3_256, ElementHasher, Hasher, Rpo256, Rpx256},
90        merkle::{
91            MerkleError, MerklePath, MerkleStore, MerkleTree, NodeIndex, PartialMerkleTree,
92            SimpleSmt,
93        },
94        random::{RandomCoin, RpoRandomCoin, RpxRandomCoin, WinterRandomCoin},
95    };
96}
97
98// TYPE ALIASES
99// ================================================================================================
100
101#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
102pub struct MemoryAddress(u32);
103
104impl From<u32> for MemoryAddress {
105    fn from(addr: u32) -> Self {
106        MemoryAddress(addr)
107    }
108}
109
110impl From<MemoryAddress> for u32 {
111    fn from(value: MemoryAddress) -> Self {
112        value.0
113    }
114}
115
116impl Display for MemoryAddress {
117    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
118        Display::fmt(&self.0, f)
119    }
120}
121
122impl LowerHex for MemoryAddress {
123    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
124        LowerHex::fmt(&self.0, f)
125    }
126}
127
128impl core::ops::Add<MemoryAddress> for MemoryAddress {
129    type Output = Self;
130
131    fn add(self, rhs: MemoryAddress) -> Self::Output {
132        MemoryAddress(self.0 + rhs.0)
133    }
134}
135
136impl core::ops::Add<u32> for MemoryAddress {
137    type Output = Self;
138
139    fn add(self, rhs: u32) -> Self::Output {
140        MemoryAddress(self.0 + rhs)
141    }
142}
143
144type SysTrace = [Vec<Felt>; SYS_TRACE_WIDTH];
145
146pub struct DecoderTrace {
147    trace: [Vec<Felt>; DECODER_TRACE_WIDTH],
148    aux_builder: decoder::AuxTraceBuilder,
149}
150
151pub struct StackTrace {
152    trace: [Vec<Felt>; STACK_TRACE_WIDTH],
153}
154
155pub struct RangeCheckTrace {
156    trace: [Vec<Felt>; RANGE_CHECK_TRACE_WIDTH],
157    aux_builder: range::AuxTraceBuilder,
158}
159
160pub struct ChipletsTrace {
161    trace: [Vec<Felt>; CHIPLETS_WIDTH],
162    aux_builder: chiplets::AuxTraceBuilder,
163}
164
165// EXECUTORS
166// ================================================================================================
167
168/// Returns an execution trace resulting from executing the provided program against the provided
169/// inputs.
170///
171/// The `host` parameter is used to provide the external environment to the program being executed,
172/// such as access to the advice provider and libraries that the program depends on.
173#[tracing::instrument("execute_program", skip_all)]
174pub fn execute(
175    program: &Program,
176    stack_inputs: StackInputs,
177    advice_inputs: AdviceInputs,
178    host: &mut impl SyncHost,
179    options: ExecutionOptions,
180    source_manager: Arc<dyn SourceManager>,
181) -> Result<ExecutionTrace, ExecutionError> {
182    let mut process = Process::new(program.kernel().clone(), stack_inputs, advice_inputs, options)
183        .with_source_manager(source_manager);
184    let stack_outputs = process.execute(program, host)?;
185    let trace = ExecutionTrace::new(process, stack_outputs);
186    assert_eq!(&program.hash(), trace.program_hash(), "inconsistent program hash");
187    Ok(trace)
188}
189
190/// Returns an iterator which allows callers to step through the execution and inspect VM state at
191/// each execution step.
192pub fn execute_iter(
193    program: &Program,
194    stack_inputs: StackInputs,
195    advice_inputs: AdviceInputs,
196    host: &mut impl SyncHost,
197    source_manager: Arc<dyn SourceManager>,
198) -> VmStateIterator {
199    let mut process = Process::new_debug(program.kernel().clone(), stack_inputs, advice_inputs)
200        .with_source_manager(source_manager);
201    let result = process.execute(program, host);
202    if result.is_ok() {
203        assert_eq!(
204            program.hash(),
205            process.decoder.program_hash().into(),
206            "inconsistent program hash"
207        );
208    }
209    VmStateIterator::new(process, result)
210}
211
212// PROCESS
213// ================================================================================================
214
215/// A [Process] is the underlying execution engine for a Miden [Program].
216///
217/// Typically, you do not need to worry about, or use [Process] directly, instead you should prefer
218/// to use either [execute] or [execute_iter], which also handle setting up the process state,
219/// inputs, as well as compute the [ExecutionTrace] for the program.
220///
221/// However, for situations in which you want finer-grained control over those steps, you will need
222/// to construct an instance of [Process] using [Process::new], invoke [Process::execute], and then
223/// get the execution trace using [ExecutionTrace::new] using the outputs produced by execution.
224#[cfg(not(any(test, feature = "testing")))]
225pub struct Process {
226    advice: AdviceProvider,
227    system: System,
228    decoder: Decoder,
229    stack: Stack,
230    range: RangeChecker,
231    chiplets: Chiplets,
232    max_cycles: u32,
233    enable_tracing: bool,
234    source_manager: Arc<dyn SourceManager>,
235}
236
237#[cfg(any(test, feature = "testing"))]
238pub struct Process {
239    pub advice: AdviceProvider,
240    pub system: System,
241    pub decoder: Decoder,
242    pub stack: Stack,
243    pub range: RangeChecker,
244    pub chiplets: Chiplets,
245    pub max_cycles: u32,
246    pub enable_tracing: bool,
247    pub source_manager: Arc<dyn SourceManager>,
248}
249
250impl Process {
251    // CONSTRUCTORS
252    // --------------------------------------------------------------------------------------------
253    /// Creates a new process with the provided inputs.
254    pub fn new(
255        kernel: Kernel,
256        stack_inputs: StackInputs,
257        advice_inputs: AdviceInputs,
258        execution_options: ExecutionOptions,
259    ) -> Self {
260        Self::initialize(kernel, stack_inputs, advice_inputs, execution_options)
261    }
262
263    /// Creates a new process with provided inputs and debug options enabled.
264    pub fn new_debug(
265        kernel: Kernel,
266        stack_inputs: StackInputs,
267        advice_inputs: AdviceInputs,
268    ) -> Self {
269        Self::initialize(
270            kernel,
271            stack_inputs,
272            advice_inputs,
273            ExecutionOptions::default().with_tracing().with_debugging(true),
274        )
275    }
276
277    fn initialize(
278        kernel: Kernel,
279        stack: StackInputs,
280        advice_inputs: AdviceInputs,
281        execution_options: ExecutionOptions,
282    ) -> Self {
283        let in_debug_mode = execution_options.enable_debugging();
284        let source_manager = Arc::new(DefaultSourceManager::default());
285        Self {
286            advice: advice_inputs.into(),
287            system: System::new(execution_options.expected_cycles() as usize),
288            decoder: Decoder::new(in_debug_mode),
289            stack: Stack::new(&stack, execution_options.expected_cycles() as usize, in_debug_mode),
290            range: RangeChecker::new(),
291            chiplets: Chiplets::new(kernel),
292            max_cycles: execution_options.max_cycles(),
293            enable_tracing: execution_options.enable_tracing(),
294            source_manager,
295        }
296    }
297
298    /// Set the internal source manager to an externally initialized one.
299    pub fn with_source_manager(mut self, source_manager: Arc<dyn SourceManager>) -> Self {
300        self.source_manager = source_manager;
301        self
302    }
303
304    // PROGRAM EXECUTOR
305    // --------------------------------------------------------------------------------------------
306
307    /// Executes the provided [`Program`] in this process.
308    pub fn execute(
309        &mut self,
310        program: &Program,
311        host: &mut impl SyncHost,
312    ) -> Result<StackOutputs, ExecutionError> {
313        if self.system.clk() != 0 {
314            return Err(ExecutionError::ProgramAlreadyExecuted);
315        }
316
317        self.advice
318            .merge_advice_map(program.mast_forest().advice_map())
319            .map_err(|err| ExecutionError::advice_error(err, RowIndex::from(0), &()))?;
320
321        self.execute_mast_node(program.entrypoint(), &program.mast_forest().clone(), host)?;
322
323        self.stack.build_stack_outputs()
324    }
325
326    // NODE EXECUTORS
327    // --------------------------------------------------------------------------------------------
328
329    fn execute_mast_node(
330        &mut self,
331        node_id: MastNodeId,
332        program: &MastForest,
333        host: &mut impl SyncHost,
334    ) -> Result<(), ExecutionError> {
335        let node = program
336            .get_node_by_id(node_id)
337            .ok_or(ExecutionError::MastNodeNotFoundInForest { node_id })?;
338
339        for &decorator_id in node.before_enter() {
340            self.execute_decorator(&program[decorator_id], host)?;
341        }
342
343        match node {
344            MastNode::Block(node) => self.execute_basic_block_node(node, program, host)?,
345            MastNode::Join(node) => self.execute_join_node(node, program, host)?,
346            MastNode::Split(node) => self.execute_split_node(node, program, host)?,
347            MastNode::Loop(node) => self.execute_loop_node(node, program, host)?,
348            MastNode::Call(node) => {
349                let err_ctx = err_ctx!(program, node, self.source_manager.clone());
350                add_error_ctx_to_external_error(
351                    self.execute_call_node(node, program, host),
352                    err_ctx,
353                )?
354            },
355            MastNode::Dyn(node) => {
356                let err_ctx = err_ctx!(program, node, self.source_manager.clone());
357                add_error_ctx_to_external_error(
358                    self.execute_dyn_node(node, program, host),
359                    err_ctx,
360                )?
361            },
362            MastNode::External(external_node) => {
363                let (root_id, mast_forest) =
364                    resolve_external_node(external_node, &mut self.advice, host)?;
365
366                self.execute_mast_node(root_id, &mast_forest, host)?;
367            },
368        }
369
370        for &decorator_id in node.after_exit() {
371            self.execute_decorator(&program[decorator_id], host)?;
372        }
373
374        Ok(())
375    }
376
377    /// Executes the specified [JoinNode].
378    #[inline(always)]
379    fn execute_join_node(
380        &mut self,
381        node: &JoinNode,
382        program: &MastForest,
383        host: &mut impl SyncHost,
384    ) -> Result<(), ExecutionError> {
385        self.start_join_node(node, program, host)?;
386
387        // execute first and then second child of the join block
388        self.execute_mast_node(node.first(), program, host)?;
389        self.execute_mast_node(node.second(), program, host)?;
390
391        self.end_join_node(node, program, host)
392    }
393
394    /// Executes the specified [SplitNode].
395    #[inline(always)]
396    fn execute_split_node(
397        &mut self,
398        node: &SplitNode,
399        program: &MastForest,
400        host: &mut impl SyncHost,
401    ) -> Result<(), ExecutionError> {
402        // start the SPLIT block; this also pops the stack and returns the popped element
403        let condition = self.start_split_node(node, program, host)?;
404
405        // execute either the true or the false branch of the split block based on the condition
406        if condition == ONE {
407            self.execute_mast_node(node.on_true(), program, host)?;
408        } else if condition == ZERO {
409            self.execute_mast_node(node.on_false(), program, host)?;
410        } else {
411            let err_ctx = err_ctx!(program, node, self.source_manager.clone());
412            return Err(ExecutionError::not_binary_value_if(condition, &err_ctx));
413        }
414
415        self.end_split_node(node, program, host)
416    }
417
418    /// Executes the specified [LoopNode].
419    #[inline(always)]
420    fn execute_loop_node(
421        &mut self,
422        node: &LoopNode,
423        program: &MastForest,
424        host: &mut impl SyncHost,
425    ) -> Result<(), ExecutionError> {
426        // start the LOOP block; this also pops the stack and returns the popped element
427        let condition = self.start_loop_node(node, program, host)?;
428
429        // if the top of the stack is ONE, execute the loop body; otherwise skip the loop body
430        if condition == ONE {
431            // execute the loop body at least once
432            self.execute_mast_node(node.body(), program, host)?;
433
434            // keep executing the loop body until the condition on the top of the stack is no
435            // longer ONE; each iteration of the loop is preceded by executing REPEAT operation
436            // which drops the condition from the stack
437            while self.stack.peek() == ONE {
438                self.decoder.repeat();
439                self.execute_op(Operation::Drop, program, host)?;
440                self.execute_mast_node(node.body(), program, host)?;
441            }
442
443            if self.stack.peek() != ZERO {
444                let err_ctx = err_ctx!(program, node, self.source_manager.clone());
445                return Err(ExecutionError::not_binary_value_loop(self.stack.peek(), &err_ctx));
446            }
447
448            // end the LOOP block and drop the condition from the stack
449            self.end_loop_node(node, true, program, host)
450        } else if condition == ZERO {
451            // end the LOOP block, but don't drop the condition from the stack because it was
452            // already dropped when we started the LOOP block
453            self.end_loop_node(node, false, program, host)
454        } else {
455            let err_ctx = err_ctx!(program, node, self.source_manager.clone());
456            Err(ExecutionError::not_binary_value_loop(condition, &err_ctx))
457        }
458    }
459
460    /// Executes the specified [CallNode].
461    #[inline(always)]
462    fn execute_call_node(
463        &mut self,
464        call_node: &CallNode,
465        program: &MastForest,
466        host: &mut impl SyncHost,
467    ) -> Result<(), ExecutionError> {
468        // call or syscall are not allowed inside a syscall
469        if self.system.in_syscall() {
470            let instruction = if call_node.is_syscall() { "syscall" } else { "call" };
471            return Err(ExecutionError::CallInSyscall(instruction));
472        }
473
474        // if this is a syscall, make sure the call target exists in the kernel
475        if call_node.is_syscall() {
476            let callee = program.get_node_by_id(call_node.callee()).ok_or_else(|| {
477                ExecutionError::MastNodeNotFoundInForest { node_id: call_node.callee() }
478            })?;
479            let err_ctx = err_ctx!(program, call_node, self.source_manager.clone());
480            self.chiplets.kernel_rom.access_proc(callee.digest(), &err_ctx)?;
481        }
482        let err_ctx = err_ctx!(program, call_node, self.source_manager.clone());
483
484        self.start_call_node(call_node, program, host)?;
485        self.execute_mast_node(call_node.callee(), program, host)?;
486        self.end_call_node(call_node, program, host, &err_ctx)
487    }
488
489    /// Executes the specified [miden_core::mast::DynNode].
490    ///
491    /// The MAST root of the callee is assumed to be at the top of the stack, and the callee is
492    /// expected to be either in the current `program` or in the host.
493    #[inline(always)]
494    fn execute_dyn_node(
495        &mut self,
496        node: &DynNode,
497        program: &MastForest,
498        host: &mut impl SyncHost,
499    ) -> Result<(), ExecutionError> {
500        // dyn calls are not allowed inside a syscall
501        if node.is_dyncall() && self.system.in_syscall() {
502            return Err(ExecutionError::CallInSyscall("dyncall"));
503        }
504
505        let err_ctx = err_ctx!(program, node, self.source_manager.clone());
506
507        let callee_hash = if node.is_dyncall() {
508            self.start_dyncall_node(node, &err_ctx)?
509        } else {
510            self.start_dyn_node(node, program, host, &err_ctx)?
511        };
512
513        // if the callee is not in the program's MAST forest, try to find a MAST forest for it in
514        // the host (corresponding to an external library loaded in the host); if none are
515        // found, return an error.
516        match program.find_procedure_root(callee_hash) {
517            Some(callee_id) => self.execute_mast_node(callee_id, program, host)?,
518            None => {
519                let mast_forest = host
520                    .get_mast_forest(&callee_hash)
521                    .ok_or_else(|| ExecutionError::dynamic_node_not_found(callee_hash, &err_ctx))?;
522
523                // We limit the parts of the program that can be called externally to procedure
524                // roots, even though MAST doesn't have that restriction.
525                let root_id = mast_forest
526                    .find_procedure_root(callee_hash)
527                    .ok_or(ExecutionError::malfored_mast_forest_in_host(callee_hash, &()))?;
528
529                self.execute_mast_node(root_id, &mast_forest, host)?
530            },
531        }
532
533        if node.is_dyncall() {
534            self.end_dyncall_node(node, program, host, &err_ctx)
535        } else {
536            self.end_dyn_node(node, program, host)
537        }
538    }
539
540    /// Executes the specified [BasicBlockNode].
541    #[inline(always)]
542    fn execute_basic_block_node(
543        &mut self,
544        basic_block: &BasicBlockNode,
545        program: &MastForest,
546        host: &mut impl SyncHost,
547    ) -> Result<(), ExecutionError> {
548        self.start_basic_block_node(basic_block, program, host)?;
549
550        let mut op_offset = 0;
551        let mut decorator_ids = basic_block.decorator_iter();
552
553        // execute the first operation batch
554        self.execute_op_batch(
555            basic_block,
556            &basic_block.op_batches()[0],
557            &mut decorator_ids,
558            op_offset,
559            program,
560            host,
561        )?;
562        op_offset += basic_block.op_batches()[0].ops().len();
563
564        // if the span contains more operation batches, execute them. each additional batch is
565        // preceded by a RESPAN operation; executing RESPAN operation does not change the state
566        // of the stack
567        for op_batch in basic_block.op_batches().iter().skip(1) {
568            self.respan(op_batch);
569            self.execute_op(Operation::Noop, program, host)?;
570            self.execute_op_batch(
571                basic_block,
572                op_batch,
573                &mut decorator_ids,
574                op_offset,
575                program,
576                host,
577            )?;
578            op_offset += op_batch.ops().len();
579        }
580
581        self.end_basic_block_node(basic_block, program, host)?;
582
583        // execute any decorators which have not been executed during span ops execution; this
584        // can happen for decorators appearing after all operations in a block. these decorators
585        // are executed after SPAN block is closed to make sure the VM clock cycle advances beyond
586        // the last clock cycle of the SPAN block ops.
587        for &decorator_id in decorator_ids {
588            let decorator = program
589                .get_decorator_by_id(decorator_id)
590                .ok_or(ExecutionError::DecoratorNotFoundInForest { decorator_id })?;
591            self.execute_decorator(decorator, host)?;
592        }
593
594        Ok(())
595    }
596
597    /// Executes all operations in an [OpBatch]. This also ensures that all alignment rules are
598    /// satisfied by executing NOOPs as needed. Specifically:
599    /// - If an operation group ends with an operation carrying an immediate value, a NOOP is
600    ///   executed after it.
601    /// - If the number of groups in a batch is not a power of 2, NOOPs are executed (one per group)
602    ///   to bring it up to the next power of two (e.g., 3 -> 4, 5 -> 8).
603    #[inline(always)]
604    fn execute_op_batch(
605        &mut self,
606        basic_block: &BasicBlockNode,
607        batch: &OpBatch,
608        decorators: &mut DecoratorIterator,
609        op_offset: usize,
610        program: &MastForest,
611        host: &mut impl SyncHost,
612    ) -> Result<(), ExecutionError> {
613        let op_counts = batch.op_counts();
614        let mut op_idx = 0;
615        let mut group_idx = 0;
616        let mut next_group_idx = 1;
617
618        // round up the number of groups to be processed to the next power of two; we do this
619        // because the processor requires the number of groups to be either 1, 2, 4, or 8; if
620        // the actual number of groups is smaller, we'll pad the batch with NOOPs at the end
621        let num_batch_groups = batch.num_groups().next_power_of_two();
622
623        // execute operations in the batch one by one
624        for (i, &op) in batch.ops().iter().enumerate() {
625            while let Some(&decorator_id) = decorators.next_filtered(i + op_offset) {
626                let decorator = program
627                    .get_decorator_by_id(decorator_id)
628                    .ok_or(ExecutionError::DecoratorNotFoundInForest { decorator_id })?;
629                self.execute_decorator(decorator, host)?;
630            }
631
632            // decode and execute the operation
633            let err_ctx =
634                err_ctx!(program, basic_block, self.source_manager.clone(), i + op_offset);
635            self.decoder.execute_user_op(op, op_idx);
636            self.execute_op_with_error_ctx(op, program, host, &err_ctx)?;
637
638            // if the operation carries an immediate value, the value is stored at the next group
639            // pointer; so, we advance the pointer to the following group
640            let has_imm = op.imm_value().is_some();
641            if has_imm {
642                next_group_idx += 1;
643            }
644
645            // determine if we've executed all non-decorator operations in a group
646            if op_idx == op_counts[group_idx] - 1 {
647                // if we are at the end of the group, first check if the operation carries an
648                // immediate value
649                if has_imm {
650                    // an operation with an immediate value cannot be the last operation in a group
651                    // so, we need execute a NOOP after it. the assert also makes sure that there
652                    // is enough room in the group to execute a NOOP (if there isn't, there is a
653                    // bug somewhere in the assembler)
654                    debug_assert!(op_idx < OP_GROUP_SIZE - 1, "invalid op index");
655                    self.decoder.execute_user_op(Operation::Noop, op_idx + 1);
656                    self.execute_op(Operation::Noop, program, host)?;
657                }
658
659                // then, move to the next group and reset operation index
660                group_idx = next_group_idx;
661                next_group_idx += 1;
662                op_idx = 0;
663
664                // if we haven't reached the end of the batch yet, set up the decoder for
665                // decoding the next operation group
666                if group_idx < num_batch_groups {
667                    self.decoder.start_op_group(batch.groups()[group_idx]);
668                }
669            } else {
670                // if we are not at the end of the group, just increment the operation index
671                op_idx += 1;
672            }
673        }
674
675        // make sure we execute the required number of operation groups; this would happen when
676        // the actual number of operation groups was not a power of two
677        for group_idx in group_idx..num_batch_groups {
678            self.decoder.execute_user_op(Operation::Noop, 0);
679            self.execute_op(Operation::Noop, program, host)?;
680
681            // if we are not at the last group yet, set up the decoder for decoding the next
682            // operation groups. the groups were are processing are just NOOPs - so, the op group
683            // value is ZERO
684            if group_idx < num_batch_groups - 1 {
685                self.decoder.start_op_group(ZERO);
686            }
687        }
688
689        Ok(())
690    }
691
692    /// Executes the specified decorator
693    fn execute_decorator(
694        &mut self,
695        decorator: &Decorator,
696        host: &mut impl SyncHost,
697    ) -> Result<(), ExecutionError> {
698        match decorator {
699            Decorator::Debug(options) => {
700                if self.decoder.in_debug_mode() {
701                    let process = &mut self.state();
702                    host.on_debug(process, options)?;
703                }
704            },
705            Decorator::AsmOp(assembly_op) => {
706                if self.decoder.in_debug_mode() {
707                    self.decoder.append_asmop(self.system.clk(), assembly_op.clone());
708                }
709            },
710            Decorator::Trace(id) => {
711                if self.enable_tracing {
712                    let process = &mut self.state();
713                    host.on_trace(process, *id)?;
714                }
715            },
716        };
717        Ok(())
718    }
719
720    // PUBLIC ACCESSORS
721    // ================================================================================================
722
723    pub const fn kernel(&self) -> &Kernel {
724        self.chiplets.kernel_rom.kernel()
725    }
726
727    pub fn into_parts(self) -> (System, Decoder, Stack, RangeChecker, Chiplets) {
728        (self.system, self.decoder, self.stack, self.range, self.chiplets)
729    }
730}
731
732#[derive(Debug)]
733pub struct SlowProcessState<'a> {
734    advice: &'a mut AdviceProvider,
735    system: &'a System,
736    stack: &'a Stack,
737    chiplets: &'a Chiplets,
738}
739
740// PROCESS STATE
741// ================================================================================================
742
743#[derive(Debug)]
744pub enum ProcessState<'a> {
745    Slow(SlowProcessState<'a>),
746    Fast(FastProcessState<'a>),
747}
748
749impl Process {
750    #[inline(always)]
751    fn state(&mut self) -> ProcessState<'_> {
752        ProcessState::Slow(SlowProcessState {
753            advice: &mut self.advice,
754            system: &self.system,
755            stack: &self.stack,
756            chiplets: &self.chiplets,
757        })
758    }
759}
760
761impl<'a> ProcessState<'a> {
762    /// Returns a reference to the advice provider.
763    #[inline(always)]
764    pub fn advice_provider(&self) -> &AdviceProvider {
765        match self {
766            ProcessState::Slow(state) => state.advice,
767            ProcessState::Fast(state) => &state.processor.advice,
768        }
769    }
770
771    /// Returns a mutable reference to the advice provider.
772    #[inline(always)]
773    pub fn advice_provider_mut(&mut self) -> &mut AdviceProvider {
774        match self {
775            ProcessState::Slow(state) => state.advice,
776            ProcessState::Fast(state) => &mut state.processor.advice,
777        }
778    }
779
780    /// Returns the current clock cycle of a process.
781    #[inline(always)]
782    pub fn clk(&self) -> RowIndex {
783        match self {
784            ProcessState::Slow(state) => state.system.clk(),
785            ProcessState::Fast(state) => state.processor.clk + state.op_idx,
786        }
787    }
788
789    /// Returns the current execution context ID.
790    #[inline(always)]
791    pub fn ctx(&self) -> ContextId {
792        match self {
793            ProcessState::Slow(state) => state.system.ctx(),
794            ProcessState::Fast(state) => state.processor.ctx,
795        }
796    }
797
798    /// Returns the current value of the free memory pointer.
799    #[inline(always)]
800    pub fn fmp(&self) -> u64 {
801        match self {
802            ProcessState::Slow(state) => state.system.fmp().as_int(),
803            ProcessState::Fast(state) => state.processor.fmp.as_int(),
804        }
805    }
806
807    /// Returns the value located at the specified position on the stack at the current clock cycle.
808    #[inline(always)]
809    pub fn get_stack_item(&self, pos: usize) -> Felt {
810        match self {
811            ProcessState::Slow(state) => state.stack.get(pos),
812            ProcessState::Fast(state) => state.processor.stack_get(pos),
813        }
814    }
815
816    /// Returns a word located at the specified word index on the stack.
817    ///
818    /// Specifically, word 0 is defined by the first 4 elements of the stack, word 1 is defined
819    /// by the next 4 elements etc. Since the top of the stack contains 4 word, the highest valid
820    /// word index is 3.
821    ///
822    /// The words are created in reverse order. For example, for word 0 the top element of the
823    /// stack will be at the last position in the word.
824    ///
825    /// Creating a word does not change the state of the stack.
826    #[inline(always)]
827    pub fn get_stack_word(&self, word_idx: usize) -> Word {
828        match self {
829            ProcessState::Slow(state) => state.stack.get_word(word_idx),
830            ProcessState::Fast(state) => state.processor.stack_get_word(word_idx * WORD_SIZE),
831        }
832    }
833
834    /// Returns stack state at the current clock cycle. This includes the top 16 items of the
835    /// stack + overflow entries.
836    #[inline(always)]
837    pub fn get_stack_state(&self) -> Vec<Felt> {
838        match self {
839            ProcessState::Slow(state) => state.stack.get_state_at(state.system.clk()),
840            ProcessState::Fast(state) => state.processor.stack().iter().rev().copied().collect(),
841        }
842    }
843
844    /// Returns the element located at the specified context/address, or None if the address hasn't
845    /// been accessed previously.
846    #[inline(always)]
847    pub fn get_mem_value(&self, ctx: ContextId, addr: u32) -> Option<Felt> {
848        match self {
849            ProcessState::Slow(state) => state.chiplets.memory.get_value(ctx, addr),
850            ProcessState::Fast(state) => state.processor.memory.read_element_impl(ctx, addr),
851        }
852    }
853
854    /// Returns the batch of elements starting at the specified context/address.
855    ///
856    /// # Errors
857    /// - If the address is not word aligned.
858    #[inline(always)]
859    pub fn get_mem_word(&self, ctx: ContextId, addr: u32) -> Result<Option<Word>, MemoryError> {
860        match self {
861            ProcessState::Slow(state) => state.chiplets.memory.get_word(ctx, addr),
862            ProcessState::Fast(state) => {
863                state.processor.memory.read_word_impl(ctx, addr, None, &())
864            },
865        }
866    }
867
868    /// Returns the entire memory state for the specified execution context at the current clock
869    /// cycle.
870    ///
871    /// The state is returned as a vector of (address, value) tuples, and includes addresses which
872    /// have been accessed at least once.
873    #[inline(always)]
874    pub fn get_mem_state(&self, ctx: ContextId) -> Vec<(MemoryAddress, Felt)> {
875        match self {
876            ProcessState::Slow(state) => {
877                state.chiplets.memory.get_state_at(ctx, state.system.clk())
878            },
879            ProcessState::Fast(state) => state.processor.memory.get_memory_state(ctx),
880        }
881    }
882}
883
884// HELPERS
885// ================================================================================================
886
887/// For errors generated from processing an `ExternalNode`, returns the same error except with
888/// proper error context.
889pub(crate) fn add_error_ctx_to_external_error(
890    result: Result<(), ExecutionError>,
891    err_ctx: impl ErrorContext,
892) -> Result<(), ExecutionError> {
893    match result {
894        Ok(_) => Ok(()),
895        // Add context information to any errors coming from executing an `ExternalNode`
896        Err(err) => match err {
897            ExecutionError::NoMastForestWithProcedure { label, source_file: _, root_digest }
898            | ExecutionError::MalformedMastForestInHost { label, source_file: _, root_digest } => {
899                if label == SourceSpan::UNKNOWN {
900                    let err_with_ctx =
901                        ExecutionError::no_mast_forest_with_procedure(root_digest, &err_ctx);
902                    Err(err_with_ctx)
903                } else {
904                    // If the source span was already populated, just return the error as-is. This
905                    // would occur when a call deeper down the call stack was responsible for the
906                    // error.
907                    Err(err)
908                }
909            },
910
911            _ => {
912                // do nothing
913                Err(err)
914            },
915        },
916    }
917}