miden_processor/
lib.rs

1#![no_std]
2
3#[macro_use]
4extern crate alloc;
5
6#[cfg(feature = "std")]
7extern crate std;
8
9use alloc::{sync::Arc, vec::Vec};
10use core::fmt::{Display, LowerHex};
11
12use fast::FastProcessor;
13use miden_air::trace::{
14    CHIPLETS_WIDTH, DECODER_TRACE_WIDTH, MIN_TRACE_LEN, RANGE_CHECK_TRACE_WIDTH, STACK_TRACE_WIDTH,
15    SYS_TRACE_WIDTH,
16};
17pub use miden_air::{ExecutionOptions, ExecutionOptionsError, RowIndex};
18use utils::resolve_external_node;
19pub use vm_core::{
20    AssemblyOp, EMPTY_WORD, Felt, Kernel, ONE, Operation, Program, ProgramInfo, QuadExtension,
21    StackInputs, StackOutputs, Word, ZERO,
22    chiplets::hasher::Digest,
23    crypto::merkle::SMT_DEPTH,
24    debuginfo::{DefaultSourceManager, SourceManager, SourceSpan},
25    errors::InputError,
26    mast::{MastForest, MastNode, MastNodeId},
27    sys_events::SystemEvent,
28    utils::{DeserializationError, collections::KvMap},
29};
30use vm_core::{
31    Decorator, DecoratorIterator, FieldElement, WORD_SIZE,
32    mast::{
33        BasicBlockNode, CallNode, DynNode, JoinNode, LoopNode, MastNodeExt, OP_GROUP_SIZE, OpBatch,
34        SplitNode,
35    },
36};
37pub use winter_prover::matrix::ColMatrix;
38
39pub mod fast;
40
41mod operations;
42
43mod system;
44use system::System;
45pub use system::{ContextId, FMP_MIN, SYSCALL_FMP_MIN};
46
47mod decoder;
48use decoder::Decoder;
49
50mod stack;
51use stack::Stack;
52
53mod range;
54use range::RangeChecker;
55
56mod host;
57pub use host::{
58    DefaultHost, Host, MastForestStore, MemMastForestStore,
59    advice::{AdviceInputs, AdviceProvider, AdviceSource, MemAdviceProvider, RecAdviceProvider},
60};
61
62mod chiplets;
63use chiplets::Chiplets;
64pub use chiplets::MemoryError;
65
66mod trace;
67use trace::TraceFragment;
68pub use trace::{ChipletsLengths, ExecutionTrace, NUM_RAND_ROWS, TraceLenSummary};
69
70mod errors;
71pub use errors::{ErrorContext, ExecutionError, Ext2InttError};
72
73pub mod utils;
74
75#[cfg(test)]
76mod tests;
77
78mod debug;
79pub use debug::{AsmOpInfo, VmState, VmStateIterator};
80
81// RE-EXPORTS
82// ================================================================================================
83
84pub mod math {
85    pub use vm_core::{Felt, FieldElement, StarkField};
86    pub use winter_prover::math::fft;
87}
88
89pub mod crypto {
90    pub use vm_core::crypto::{
91        hash::{
92            Blake3_192, Blake3_256, ElementHasher, Hasher, Rpo256, RpoDigest, Rpx256, RpxDigest,
93        },
94        merkle::{
95            MerkleError, MerklePath, MerkleStore, MerkleTree, NodeIndex, PartialMerkleTree,
96            SimpleSmt,
97        },
98        random::{RandomCoin, RpoRandomCoin, RpxRandomCoin, WinterRandomCoin},
99    };
100}
101
102// TYPE ALIASES
103// ================================================================================================
104
105type QuadFelt = QuadExtension<Felt>;
106
107#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
108pub struct MemoryAddress(u32);
109
110impl From<u32> for MemoryAddress {
111    fn from(addr: u32) -> Self {
112        MemoryAddress(addr)
113    }
114}
115
116impl From<MemoryAddress> for u32 {
117    fn from(value: MemoryAddress) -> Self {
118        value.0
119    }
120}
121
122impl Display for MemoryAddress {
123    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
124        Display::fmt(&self.0, f)
125    }
126}
127
128impl LowerHex for MemoryAddress {
129    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
130        LowerHex::fmt(&self.0, f)
131    }
132}
133
134impl core::ops::Add<MemoryAddress> for MemoryAddress {
135    type Output = Self;
136
137    fn add(self, rhs: MemoryAddress) -> Self::Output {
138        MemoryAddress(self.0 + rhs.0)
139    }
140}
141
142impl core::ops::Add<u32> for MemoryAddress {
143    type Output = Self;
144
145    fn add(self, rhs: u32) -> Self::Output {
146        MemoryAddress(self.0 + rhs)
147    }
148}
149
150type SysTrace = [Vec<Felt>; SYS_TRACE_WIDTH];
151
152pub struct DecoderTrace {
153    trace: [Vec<Felt>; DECODER_TRACE_WIDTH],
154    aux_builder: decoder::AuxTraceBuilder,
155}
156
157pub struct StackTrace {
158    trace: [Vec<Felt>; STACK_TRACE_WIDTH],
159}
160
161pub struct RangeCheckTrace {
162    trace: [Vec<Felt>; RANGE_CHECK_TRACE_WIDTH],
163    aux_builder: range::AuxTraceBuilder,
164}
165
166pub struct ChipletsTrace {
167    trace: [Vec<Felt>; CHIPLETS_WIDTH],
168    aux_builder: chiplets::AuxTraceBuilder,
169}
170
171// EXECUTORS
172// ================================================================================================
173
174/// Returns an execution trace resulting from executing the provided program against the provided
175/// inputs.
176///
177/// The `host` parameter is used to provide the external environment to the program being executed,
178/// such as access to the advice provider and libraries that the program depends on.
179#[tracing::instrument("execute_program", skip_all)]
180pub fn execute(
181    program: &Program,
182    stack_inputs: StackInputs,
183    host: &mut impl Host,
184    options: ExecutionOptions,
185    source_manager: Arc<dyn SourceManager>,
186) -> Result<ExecutionTrace, ExecutionError> {
187    let mut process = Process::new(program.kernel().clone(), stack_inputs, options)
188        .with_source_manager(source_manager);
189    let stack_outputs = process.execute(program, host)?;
190    let trace = ExecutionTrace::new(process, stack_outputs);
191    assert_eq!(&program.hash(), trace.program_hash(), "inconsistent program hash");
192    Ok(trace)
193}
194
195/// Returns an iterator which allows callers to step through the execution and inspect VM state at
196/// each execution step.
197pub fn execute_iter(
198    program: &Program,
199    stack_inputs: StackInputs,
200    host: &mut impl Host,
201    source_manager: Arc<dyn SourceManager>,
202) -> VmStateIterator {
203    let mut process = Process::new_debug(program.kernel().clone(), stack_inputs)
204        .with_source_manager(source_manager);
205    let result = process.execute(program, host);
206    if result.is_ok() {
207        assert_eq!(
208            program.hash(),
209            process.decoder.program_hash().into(),
210            "inconsistent program hash"
211        );
212    }
213    VmStateIterator::new(process, result)
214}
215
216// PROCESS
217// ================================================================================================
218
219/// A [Process] is the underlying execution engine for a Miden [Program].
220///
221/// Typically, you do not need to worry about, or use [Process] directly, instead you should prefer
222/// to use either [execute] or [execute_iter], which also handle setting up the process state,
223/// inputs, as well as compute the [ExecutionTrace] for the program.
224///
225/// However, for situations in which you want finer-grained control over those steps, you will need
226/// to construct an instance of [Process] using [Process::new], invoke [Process::execute], and then
227/// get the execution trace using [ExecutionTrace::new] using the outputs produced by execution.
228#[cfg(not(any(test, feature = "testing")))]
229pub struct Process {
230    system: System,
231    decoder: Decoder,
232    stack: Stack,
233    range: RangeChecker,
234    chiplets: Chiplets,
235    max_cycles: u32,
236    enable_tracing: bool,
237    source_manager: Arc<dyn SourceManager>,
238}
239
240#[cfg(any(test, feature = "testing"))]
241pub struct Process {
242    pub system: System,
243    pub decoder: Decoder,
244    pub stack: Stack,
245    pub range: RangeChecker,
246    pub chiplets: Chiplets,
247    pub max_cycles: u32,
248    pub enable_tracing: bool,
249    pub source_manager: Arc<dyn SourceManager>,
250}
251
252impl Process {
253    // CONSTRUCTORS
254    // --------------------------------------------------------------------------------------------
255    /// Creates a new process with the provided inputs.
256    pub fn new(
257        kernel: Kernel,
258        stack_inputs: StackInputs,
259        execution_options: ExecutionOptions,
260    ) -> Self {
261        Self::initialize(kernel, stack_inputs, execution_options)
262    }
263
264    /// Creates a new process with provided inputs and debug options enabled.
265    pub fn new_debug(kernel: Kernel, stack_inputs: StackInputs) -> Self {
266        Self::initialize(
267            kernel,
268            stack_inputs,
269            ExecutionOptions::default().with_tracing().with_debugging(true),
270        )
271    }
272
273    fn initialize(kernel: Kernel, stack: StackInputs, execution_options: ExecutionOptions) -> Self {
274        let in_debug_mode = execution_options.enable_debugging();
275        let source_manager = Arc::new(DefaultSourceManager::default());
276        Self {
277            system: System::new(execution_options.expected_cycles() as usize),
278            decoder: Decoder::new(in_debug_mode),
279            stack: Stack::new(&stack, execution_options.expected_cycles() as usize, in_debug_mode),
280            range: RangeChecker::new(),
281            chiplets: Chiplets::new(kernel),
282            max_cycles: execution_options.max_cycles(),
283            enable_tracing: execution_options.enable_tracing(),
284            source_manager,
285        }
286    }
287
288    /// Set the internal source manager to an externally initialized one.
289    pub fn with_source_manager(mut self, source_manager: Arc<dyn SourceManager>) -> Self {
290        self.source_manager = source_manager;
291        self
292    }
293
294    // PROGRAM EXECUTOR
295    // --------------------------------------------------------------------------------------------
296
297    /// Executes the provided [`Program`] in this process.
298    pub fn execute(
299        &mut self,
300        program: &Program,
301        host: &mut impl Host,
302    ) -> Result<StackOutputs, ExecutionError> {
303        if self.system.clk() != 0 {
304            return Err(ExecutionError::ProgramAlreadyExecuted);
305        }
306
307        // Load the program's advice data into the advice provider
308        for (digest, values) in program.mast_forest().advice_map().iter() {
309            if let Some(stored_values) = host.advice_provider().get_mapped_values(digest) {
310                if stored_values != values {
311                    return Err(ExecutionError::AdviceMapKeyAlreadyPresent {
312                        key: digest.into(),
313                        prev_values: stored_values.to_vec(),
314                        new_values: values.clone(),
315                    });
316                }
317            } else {
318                host.advice_provider_mut().insert_into_map(digest.into(), values.clone());
319            }
320        }
321
322        self.execute_mast_node(program.entrypoint(), &program.mast_forest().clone(), host)?;
323
324        self.stack.build_stack_outputs()
325    }
326
327    // NODE EXECUTORS
328    // --------------------------------------------------------------------------------------------
329
330    fn execute_mast_node(
331        &mut self,
332        node_id: MastNodeId,
333        program: &MastForest,
334        host: &mut impl Host,
335    ) -> Result<(), ExecutionError> {
336        let node = program
337            .get_node_by_id(node_id)
338            .ok_or(ExecutionError::MastNodeNotFoundInForest { node_id })?;
339
340        for &decorator_id in node.before_enter() {
341            self.execute_decorator(&program[decorator_id], host)?;
342        }
343
344        match node {
345            MastNode::Block(node) => self.execute_basic_block_node(node, program, host)?,
346            MastNode::Join(node) => self.execute_join_node(node, program, host)?,
347            MastNode::Split(node) => self.execute_split_node(node, program, host)?,
348            MastNode::Loop(node) => self.execute_loop_node(node, program, host)?,
349            MastNode::Call(node) => {
350                let err_ctx = ErrorContext::new(program, node, self.source_manager.clone());
351                add_error_ctx_to_external_error(
352                    self.execute_call_node(node, program, host),
353                    err_ctx,
354                )?
355            },
356            MastNode::Dyn(node) => {
357                let err_ctx = ErrorContext::new(program, node, self.source_manager.clone());
358                add_error_ctx_to_external_error(
359                    self.execute_dyn_node(node, program, host),
360                    err_ctx,
361                )?
362            },
363            MastNode::External(external_node) => {
364                let (root_id, mast_forest) = resolve_external_node(external_node, host)?;
365
366                self.execute_mast_node(root_id, &mast_forest, host)?;
367            },
368        }
369
370        for &decorator_id in node.after_exit() {
371            self.execute_decorator(&program[decorator_id], host)?;
372        }
373
374        Ok(())
375    }
376
377    /// Executes the specified [JoinNode].
378    #[inline(always)]
379    fn execute_join_node(
380        &mut self,
381        node: &JoinNode,
382        program: &MastForest,
383        host: &mut impl Host,
384    ) -> Result<(), ExecutionError> {
385        self.start_join_node(node, program, host)?;
386
387        // execute first and then second child of the join block
388        self.execute_mast_node(node.first(), program, host)?;
389        self.execute_mast_node(node.second(), program, host)?;
390
391        self.end_join_node(node, program, host)
392    }
393
394    /// Executes the specified [SplitNode].
395    #[inline(always)]
396    fn execute_split_node(
397        &mut self,
398        node: &SplitNode,
399        program: &MastForest,
400        host: &mut impl Host,
401    ) -> Result<(), ExecutionError> {
402        // start the SPLIT block; this also pops the stack and returns the popped element
403        let condition = self.start_split_node(node, program, host)?;
404
405        // execute either the true or the false branch of the split block based on the condition
406        if condition == ONE {
407            self.execute_mast_node(node.on_true(), program, host)?;
408        } else if condition == ZERO {
409            self.execute_mast_node(node.on_false(), program, host)?;
410        } else {
411            let err_ctx = ErrorContext::new(program, node, self.source_manager.clone());
412            return Err(ExecutionError::not_binary_value_if(condition, &err_ctx));
413        }
414
415        self.end_split_node(node, program, host)
416    }
417
418    /// Executes the specified [LoopNode].
419    #[inline(always)]
420    fn execute_loop_node(
421        &mut self,
422        node: &LoopNode,
423        program: &MastForest,
424        host: &mut impl Host,
425    ) -> Result<(), ExecutionError> {
426        // start the LOOP block; this also pops the stack and returns the popped element
427        let condition = self.start_loop_node(node, program, host)?;
428
429        // if the top of the stack is ONE, execute the loop body; otherwise skip the loop body
430        if condition == ONE {
431            // execute the loop body at least once
432            self.execute_mast_node(node.body(), program, host)?;
433
434            // keep executing the loop body until the condition on the top of the stack is no
435            // longer ONE; each iteration of the loop is preceded by executing REPEAT operation
436            // which drops the condition from the stack
437            while self.stack.peek() == ONE {
438                self.decoder.repeat();
439                self.execute_op(Operation::Drop, program, host)?;
440                self.execute_mast_node(node.body(), program, host)?;
441            }
442
443            if self.stack.peek() != ZERO {
444                let err_ctx = ErrorContext::new(program, node, self.source_manager.clone());
445                return Err(ExecutionError::not_binary_value_loop(self.stack.peek(), &err_ctx));
446            }
447
448            // end the LOOP block and drop the condition from the stack
449            self.end_loop_node(node, true, program, host)
450        } else if condition == ZERO {
451            // end the LOOP block, but don't drop the condition from the stack because it was
452            // already dropped when we started the LOOP block
453            self.end_loop_node(node, false, program, host)
454        } else {
455            let err_ctx = ErrorContext::new(program, node, self.source_manager.clone());
456            Err(ExecutionError::not_binary_value_loop(condition, &err_ctx))
457        }
458    }
459
460    /// Executes the specified [CallNode].
461    #[inline(always)]
462    fn execute_call_node(
463        &mut self,
464        call_node: &CallNode,
465        program: &MastForest,
466        host: &mut impl Host,
467    ) -> Result<(), ExecutionError> {
468        // call or syscall are not allowed inside a syscall
469        if self.system.in_syscall() {
470            let instruction = if call_node.is_syscall() { "syscall" } else { "call" };
471            return Err(ExecutionError::CallInSyscall(instruction));
472        }
473
474        // if this is a syscall, make sure the call target exists in the kernel
475        if call_node.is_syscall() {
476            let callee = program.get_node_by_id(call_node.callee()).ok_or_else(|| {
477                ExecutionError::MastNodeNotFoundInForest { node_id: call_node.callee() }
478            })?;
479            let err_ctx = ErrorContext::new(program, call_node, self.source_manager.clone());
480            self.chiplets.kernel_rom.access_proc(callee.digest(), &err_ctx)?;
481        }
482        let err_ctx = ErrorContext::new(program, call_node, self.source_manager.clone());
483
484        self.start_call_node(call_node, program, host)?;
485        self.execute_mast_node(call_node.callee(), program, host)?;
486        self.end_call_node(call_node, program, host, &err_ctx)
487    }
488
489    /// Executes the specified [vm_core::mast::DynNode].
490    ///
491    /// The MAST root of the callee is assumed to be at the top of the stack, and the callee is
492    /// expected to be either in the current `program` or in the host.
493    #[inline(always)]
494    fn execute_dyn_node(
495        &mut self,
496        node: &DynNode,
497        program: &MastForest,
498        host: &mut impl Host,
499    ) -> Result<(), ExecutionError> {
500        // dyn calls are not allowed inside a syscall
501        if node.is_dyncall() && self.system.in_syscall() {
502            return Err(ExecutionError::CallInSyscall("dyncall"));
503        }
504
505        let error_ctx = ErrorContext::new(program, node, self.source_manager.clone());
506
507        let callee_hash = if node.is_dyncall() {
508            self.start_dyncall_node(node, &error_ctx)?
509        } else {
510            self.start_dyn_node(node, program, host, &error_ctx)?
511        };
512
513        // if the callee is not in the program's MAST forest, try to find a MAST forest for it in
514        // the host (corresponding to an external library loaded in the host); if none are
515        // found, return an error.
516        match program.find_procedure_root(callee_hash.into()) {
517            Some(callee_id) => self.execute_mast_node(callee_id, program, host)?,
518            None => {
519                let mast_forest = host.get_mast_forest(&callee_hash.into()).ok_or_else(|| {
520                    ExecutionError::dynamic_node_not_found(callee_hash.into(), &error_ctx)
521                })?;
522
523                // We limit the parts of the program that can be called externally to procedure
524                // roots, even though MAST doesn't have that restriction.
525                let root_id = mast_forest.find_procedure_root(callee_hash.into()).ok_or(
526                    ExecutionError::malfored_mast_forest_in_host(
527                        callee_hash.into(),
528                        &ErrorContext::default(),
529                    ),
530                )?;
531
532                self.execute_mast_node(root_id, &mast_forest, host)?
533            },
534        }
535
536        if node.is_dyncall() {
537            self.end_dyncall_node(node, program, host, &error_ctx)
538        } else {
539            self.end_dyn_node(node, program, host)
540        }
541    }
542
543    /// Executes the specified [BasicBlockNode].
544    #[inline(always)]
545    fn execute_basic_block_node(
546        &mut self,
547        basic_block: &BasicBlockNode,
548        program: &MastForest,
549        host: &mut impl Host,
550    ) -> Result<(), ExecutionError> {
551        self.start_basic_block_node(basic_block, program, host)?;
552
553        let mut op_offset = 0;
554        let mut decorator_ids = basic_block.decorator_iter();
555
556        // execute the first operation batch
557        self.execute_op_batch(
558            basic_block,
559            &basic_block.op_batches()[0],
560            &mut decorator_ids,
561            op_offset,
562            program,
563            host,
564        )?;
565        op_offset += basic_block.op_batches()[0].ops().len();
566
567        // if the span contains more operation batches, execute them. each additional batch is
568        // preceded by a RESPAN operation; executing RESPAN operation does not change the state
569        // of the stack
570        for op_batch in basic_block.op_batches().iter().skip(1) {
571            self.respan(op_batch);
572            self.execute_op(Operation::Noop, program, host)?;
573            self.execute_op_batch(
574                basic_block,
575                op_batch,
576                &mut decorator_ids,
577                op_offset,
578                program,
579                host,
580            )?;
581            op_offset += op_batch.ops().len();
582        }
583
584        self.end_basic_block_node(basic_block, program, host)?;
585
586        // execute any decorators which have not been executed during span ops execution; this
587        // can happen for decorators appearing after all operations in a block. these decorators
588        // are executed after SPAN block is closed to make sure the VM clock cycle advances beyond
589        // the last clock cycle of the SPAN block ops.
590        for &decorator_id in decorator_ids {
591            let decorator = program
592                .get_decorator_by_id(decorator_id)
593                .ok_or(ExecutionError::DecoratorNotFoundInForest { decorator_id })?;
594            self.execute_decorator(decorator, host)?;
595        }
596
597        Ok(())
598    }
599
600    /// Executes all operations in an [OpBatch]. This also ensures that all alignment rules are
601    /// satisfied by executing NOOPs as needed. Specifically:
602    /// - If an operation group ends with an operation carrying an immediate value, a NOOP is
603    ///   executed after it.
604    /// - If the number of groups in a batch is not a power of 2, NOOPs are executed (one per group)
605    ///   to bring it up to the next power of two (e.g., 3 -> 4, 5 -> 8).
606    #[inline(always)]
607    fn execute_op_batch(
608        &mut self,
609        basic_block: &BasicBlockNode,
610        batch: &OpBatch,
611        decorators: &mut DecoratorIterator,
612        op_offset: usize,
613        program: &MastForest,
614        host: &mut impl Host,
615    ) -> Result<(), ExecutionError> {
616        let op_counts = batch.op_counts();
617        let mut op_idx = 0;
618        let mut group_idx = 0;
619        let mut next_group_idx = 1;
620
621        // round up the number of groups to be processed to the next power of two; we do this
622        // because the processor requires the number of groups to be either 1, 2, 4, or 8; if
623        // the actual number of groups is smaller, we'll pad the batch with NOOPs at the end
624        let num_batch_groups = batch.num_groups().next_power_of_two();
625
626        // execute operations in the batch one by one
627        for (i, &op) in batch.ops().iter().enumerate() {
628            while let Some(&decorator_id) = decorators.next_filtered(i + op_offset) {
629                let decorator = program
630                    .get_decorator_by_id(decorator_id)
631                    .ok_or(ExecutionError::DecoratorNotFoundInForest { decorator_id })?;
632                self.execute_decorator(decorator, host)?;
633            }
634
635            // decode and execute the operation
636            let error_ctx = ErrorContext::new_with_op_idx(
637                program,
638                basic_block,
639                self.source_manager.clone(),
640                i + op_offset,
641            );
642            self.decoder.execute_user_op(op, op_idx);
643            self.execute_op_with_error_ctx(op, program, host, &error_ctx)?;
644
645            // if the operation carries an immediate value, the value is stored at the next group
646            // pointer; so, we advance the pointer to the following group
647            let has_imm = op.imm_value().is_some();
648            if has_imm {
649                next_group_idx += 1;
650            }
651
652            // determine if we've executed all non-decorator operations in a group
653            if op_idx == op_counts[group_idx] - 1 {
654                // if we are at the end of the group, first check if the operation carries an
655                // immediate value
656                if has_imm {
657                    // an operation with an immediate value cannot be the last operation in a group
658                    // so, we need execute a NOOP after it. the assert also makes sure that there
659                    // is enough room in the group to execute a NOOP (if there isn't, there is a
660                    // bug somewhere in the assembler)
661                    debug_assert!(op_idx < OP_GROUP_SIZE - 1, "invalid op index");
662                    self.decoder.execute_user_op(Operation::Noop, op_idx + 1);
663                    self.execute_op(Operation::Noop, program, host)?;
664                }
665
666                // then, move to the next group and reset operation index
667                group_idx = next_group_idx;
668                next_group_idx += 1;
669                op_idx = 0;
670
671                // if we haven't reached the end of the batch yet, set up the decoder for
672                // decoding the next operation group
673                if group_idx < num_batch_groups {
674                    self.decoder.start_op_group(batch.groups()[group_idx]);
675                }
676            } else {
677                // if we are not at the end of the group, just increment the operation index
678                op_idx += 1;
679            }
680        }
681
682        // make sure we execute the required number of operation groups; this would happen when
683        // the actual number of operation groups was not a power of two
684        for group_idx in group_idx..num_batch_groups {
685            self.decoder.execute_user_op(Operation::Noop, 0);
686            self.execute_op(Operation::Noop, program, host)?;
687
688            // if we are not at the last group yet, set up the decoder for decoding the next
689            // operation groups. the groups were are processing are just NOOPs - so, the op group
690            // value is ZERO
691            if group_idx < num_batch_groups - 1 {
692                self.decoder.start_op_group(ZERO);
693            }
694        }
695
696        Ok(())
697    }
698
699    /// Executes the specified decorator
700    fn execute_decorator(
701        &mut self,
702        decorator: &Decorator,
703        host: &mut impl Host,
704    ) -> Result<(), ExecutionError> {
705        match decorator {
706            Decorator::Debug(options) => {
707                if self.decoder.in_debug_mode() {
708                    host.on_debug(self.into(), options)?;
709                }
710            },
711            Decorator::AsmOp(assembly_op) => {
712                if self.decoder.in_debug_mode() {
713                    self.decoder.append_asmop(self.system.clk(), assembly_op.clone());
714                }
715            },
716            Decorator::Trace(id) => {
717                if self.enable_tracing {
718                    host.on_trace(self.into(), *id)?;
719                }
720            },
721        };
722        Ok(())
723    }
724
725    // PUBLIC ACCESSORS
726    // ================================================================================================
727
728    pub const fn kernel(&self) -> &Kernel {
729        self.chiplets.kernel_rom.kernel()
730    }
731
732    pub fn into_parts(self) -> (System, Decoder, Stack, RangeChecker, Chiplets) {
733        (self.system, self.decoder, self.stack, self.range, self.chiplets)
734    }
735}
736
737// PROCESS STATE
738// ================================================================================================
739
740#[derive(Debug, Clone, Copy)]
741pub struct SlowProcessState<'a> {
742    system: &'a System,
743    stack: &'a Stack,
744    chiplets: &'a Chiplets,
745}
746
747#[derive(Debug, Clone, Copy)]
748pub struct FastProcessState<'a> {
749    processor: &'a FastProcessor,
750    /// the index of the operation in its basic block
751    op_idx: usize,
752}
753
754#[derive(Debug, Clone, Copy)]
755pub enum ProcessState<'a> {
756    Slow(SlowProcessState<'a>),
757    Fast(FastProcessState<'a>),
758}
759
760impl<'a> ProcessState<'a> {
761    pub fn new_fast(processor: &'a FastProcessor, op_idx: usize) -> Self {
762        Self::Fast(FastProcessState { processor, op_idx })
763    }
764
765    /// Returns the current clock cycle of a process.
766    #[inline(always)]
767    pub fn clk(&self) -> RowIndex {
768        match self {
769            ProcessState::Slow(state) => state.system.clk(),
770            ProcessState::Fast(state) => state.processor.clk + state.op_idx,
771        }
772    }
773
774    /// Returns the current execution context ID.
775    #[inline(always)]
776    pub fn ctx(&self) -> ContextId {
777        match self {
778            ProcessState::Slow(state) => state.system.ctx(),
779            ProcessState::Fast(state) => state.processor.ctx,
780        }
781    }
782
783    /// Returns the current value of the free memory pointer.
784    #[inline(always)]
785    pub fn fmp(&self) -> u64 {
786        match self {
787            ProcessState::Slow(state) => state.system.fmp().as_int(),
788            ProcessState::Fast(state) => state.processor.fmp.as_int(),
789        }
790    }
791
792    /// Returns the value located at the specified position on the stack at the current clock cycle.
793    #[inline(always)]
794    pub fn get_stack_item(&self, pos: usize) -> Felt {
795        match self {
796            ProcessState::Slow(state) => state.stack.get(pos),
797            ProcessState::Fast(state) => state.processor.stack_get(pos),
798        }
799    }
800
801    /// Returns a word located at the specified word index on the stack.
802    ///
803    /// Specifically, word 0 is defined by the first 4 elements of the stack, word 1 is defined
804    /// by the next 4 elements etc. Since the top of the stack contains 4 word, the highest valid
805    /// word index is 3.
806    ///
807    /// The words are created in reverse order. For example, for word 0 the top element of the
808    /// stack will be at the last position in the word.
809    ///
810    /// Creating a word does not change the state of the stack.
811    #[inline(always)]
812    pub fn get_stack_word(&self, word_idx: usize) -> Word {
813        match self {
814            ProcessState::Slow(state) => state.stack.get_word(word_idx),
815            ProcessState::Fast(state) => state.processor.stack_get_word(word_idx * WORD_SIZE),
816        }
817    }
818
819    /// Returns stack state at the current clock cycle. This includes the top 16 items of the
820    /// stack + overflow entries.
821    #[inline(always)]
822    pub fn get_stack_state(&self) -> Vec<Felt> {
823        match self {
824            ProcessState::Slow(state) => state.stack.get_state_at(state.system.clk()),
825            ProcessState::Fast(state) => state.processor.stack().iter().rev().copied().collect(),
826        }
827    }
828
829    /// Returns the element located at the specified context/address, or None if the address hasn't
830    /// been accessed previously.
831    #[inline(always)]
832    pub fn get_mem_value(&self, ctx: ContextId, addr: u32) -> Option<Felt> {
833        match self {
834            ProcessState::Slow(state) => state.chiplets.memory.get_value(ctx, addr),
835            ProcessState::Fast(state) => state.processor.memory.read_element_impl(ctx, addr),
836        }
837    }
838
839    /// Returns the batch of elements starting at the specified context/address.
840    ///
841    /// # Errors
842    /// - If the address is not word aligned.
843    #[inline(always)]
844    pub fn get_mem_word(&self, ctx: ContextId, addr: u32) -> Result<Option<Word>, ExecutionError> {
845        match self {
846            ProcessState::Slow(state) => {
847                state.chiplets.memory.get_word(ctx, addr).map_err(ExecutionError::MemoryError)
848            },
849            ProcessState::Fast(state) => {
850                Ok(state.processor.memory.read_word_impl(ctx, addr, None)?.copied())
851            },
852        }
853    }
854
855    /// Returns the entire memory state for the specified execution context at the current clock
856    /// cycle.
857    ///
858    /// The state is returned as a vector of (address, value) tuples, and includes addresses which
859    /// have been accessed at least once.
860    #[inline(always)]
861    pub fn get_mem_state(&self, ctx: ContextId) -> Vec<(MemoryAddress, Felt)> {
862        match self {
863            ProcessState::Slow(state) => {
864                state.chiplets.memory.get_state_at(ctx, state.system.clk())
865            },
866            ProcessState::Fast(state) => state.processor.memory.get_memory_state(ctx),
867        }
868    }
869}
870
871// CONVERSIONS
872// ===============================================================================================
873
874impl<'a> From<&'a Process> for ProcessState<'a> {
875    fn from(process: &'a Process) -> Self {
876        Self::Slow(SlowProcessState {
877            system: &process.system,
878            stack: &process.stack,
879            chiplets: &process.chiplets,
880        })
881    }
882}
883
884impl<'a> From<&'a mut Process> for ProcessState<'a> {
885    fn from(process: &'a mut Process) -> Self {
886        Self::Slow(SlowProcessState {
887            system: &process.system,
888            stack: &process.stack,
889            chiplets: &process.chiplets,
890        })
891    }
892}
893
894// HELPERS
895// ================================================================================================
896
897/// For errors generated from processing an `ExternalNode`, returns the same error except with
898/// proper error context.
899fn add_error_ctx_to_external_error(
900    result: Result<(), ExecutionError>,
901    err_ctx: ErrorContext<impl MastNodeExt>,
902) -> Result<(), ExecutionError> {
903    match result {
904        Ok(_) => Ok(()),
905        // Add context information to any errors coming from executing an `ExternalNode`
906        Err(err) => match err {
907            ExecutionError::NoMastForestWithProcedure { label, source_file: _, root_digest }
908            | ExecutionError::MalformedMastForestInHost { label, source_file: _, root_digest } => {
909                if label == SourceSpan::UNKNOWN {
910                    let err_with_ctx =
911                        ExecutionError::no_mast_forest_with_procedure(root_digest, &err_ctx);
912                    Err(err_with_ctx)
913                } else {
914                    // If the source span was already populated, just return the error as-is. This
915                    // would occur when a call deeper down the call stack was responsible for the
916                    // error.
917                    Err(err)
918                }
919            },
920
921            _ => {
922                // do nothing
923                Err(err)
924            },
925        },
926    }
927}