Skip to main content

miden_processor/fast/
mod.rs

1#[cfg(test)]
2use alloc::rc::Rc;
3use alloc::{boxed::Box, sync::Arc, vec::Vec};
4#[cfg(test)]
5use core::cell::Cell;
6use core::{cmp::min, ops::ControlFlow};
7
8use miden_air::{
9    Felt,
10    trace::{RowIndex, chiplets::hasher::STATE_WIDTH},
11};
12use miden_core::{
13    EMPTY_WORD, WORD_SIZE, Word, ZERO,
14    crypto::merkle::MerklePath,
15    mast::{MastForest, MastNodeExt, MastNodeId},
16    operations::Decorator,
17    precompile::PrecompileTranscript,
18    program::{Kernel, MIN_STACK_DEPTH, Program, StackInputs, StackOutputs},
19    utils::range,
20};
21use tracing::instrument;
22
23use crate::{
24    AdviceInputs, AdviceProvider, ContextId, ExecutionError, ExecutionOptions, Host,
25    ProcessorState, Stopper,
26    continuation_stack::{Continuation, ContinuationStack},
27    errors::{MapExecErr, MapExecErrNoCtx, OperationError},
28    execution::{
29        InternalBreakReason, execute_impl, finish_emit_op_execution,
30        finish_load_mast_forest_from_dyn_start, finish_load_mast_forest_from_external,
31    },
32    trace::{
33        chiplets::CircuitEvaluation,
34        execution_tracer::{ExecutionTracer, TraceGenerationContext},
35    },
36    tracer::{OperationHelperRegisters, Tracer},
37};
38
39mod basic_block;
40mod call_and_dyn;
41mod external;
42mod memory;
43mod operation;
44mod step;
45
46pub use basic_block::SystemEventError;
47use external::maybe_use_caller_error_context;
48pub use memory::Memory;
49pub use step::{BreakReason, ResumeContext};
50use step::{NeverStopper, StepStopper};
51
52#[cfg(test)]
53mod tests;
54
55// CONSTANTS
56// ================================================================================================
57
58/// The size of the stack buffer.
59///
60/// Note: This value is much larger than it needs to be for the majority of programs. However, some
61/// existing programs need it, so we're forced to push it up (though this should be double-checked).
62/// At this high a value, we're starting to see some performance degradation on benchmarks. For
63/// example, the blake3 benchmark went from 285 MHz to 250 MHz (~10% degradation). Perhaps a better
64/// solution would be to make this value much smaller (~1000), and then fallback to a `Vec` if the
65/// stack overflows.
66const STACK_BUFFER_SIZE: usize = 6850;
67
68/// The initial position of the top of the stack in the stack buffer.
69///
70/// We place this value close to 0 because if a program hits the limit, it's much more likely to hit
71/// the upper bound than the lower bound, since hitting the lower bound only occurs when you drop
72/// 0's that were generated automatically to keep the stack depth at 16. In practice, if this
73/// occurs, it is most likely a bug.
74const INITIAL_STACK_TOP_IDX: usize = 250;
75
76// FAST PROCESSOR
77// ================================================================================================
78
79/// A fast processor which doesn't generate any trace.
80///
81/// This processor is designed to be as fast as possible. Hence, it only keeps track of the current
82/// state of the processor (i.e. the stack, current clock cycle, current memory context, and free
83/// memory pointer).
84///
85/// # Stack Management
86/// A few key points about how the stack was designed for maximum performance:
87///
88/// - The stack has a fixed buffer size defined by `STACK_BUFFER_SIZE`.
89///     - This was observed to increase performance by at least 2x compared to using a `Vec` with
90///       `push()` & `pop()`.
91///     - We track the stack top and bottom using indices `stack_top_idx` and `stack_bot_idx`,
92///       respectively.
93/// - Since we are using a fixed-size buffer, we need to ensure that stack buffer accesses are not
94///   out of bounds. Naively, we could check for this on every access. However, every operation
95///   alters the stack depth by a predetermined amount, allowing us to precisely determine the
96///   minimum number of operations required to reach a stack buffer boundary, whether at the top or
97///   bottom.
98///     - For example, if the stack top is 10 elements away from the top boundary, and the stack
99///       bottom is 15 elements away from the bottom boundary, then we can safely execute 10
100///       operations that modify the stack depth with no bounds check.
101/// - When switching contexts (e.g., during a call or syscall), all elements past the first 16 are
102///   stored in an `ExecutionContextInfo` struct, and the stack is truncated to 16 elements. This
103///   will be restored when returning from the call or syscall.
104///
105/// # Clock Cycle Management
106/// - The clock cycle (`clk`) is managed in the same way as in `Process`. That is, it is incremented
107///   by 1 for every row that `Process` adds to the main trace.
108///     - It is important to do so because the clock cycle is used to determine the context ID for
109///       new execution contexts when using `call` or `dyncall`.
110#[derive(Debug)]
111pub struct FastProcessor {
112    /// The stack is stored in reverse order, so that the last element is at the top of the stack.
113    stack: Box<[Felt; STACK_BUFFER_SIZE]>,
114    /// The index of the top of the stack.
115    stack_top_idx: usize,
116    /// The index of the bottom of the stack.
117    stack_bot_idx: usize,
118
119    /// The current clock cycle.
120    clk: RowIndex,
121
122    /// The current context ID.
123    ctx: ContextId,
124
125    /// The hash of the function that called into the current context, or `[ZERO, ZERO, ZERO,
126    /// ZERO]` if we are in the first context (i.e. when `call_stack` is empty).
127    caller_hash: Word,
128
129    /// The advice provider to be used during execution.
130    advice: AdviceProvider,
131
132    /// A map from (context_id, word_address) to the word stored starting at that memory location.
133    memory: Memory,
134
135    /// The call stack is used when starting a new execution context (from a `call`, `syscall` or
136    /// `dyncall`) to keep track of the information needed to return to the previous context upon
137    /// return. It is a stack since calls can be nested.
138    call_stack: Vec<ExecutionContextInfo>,
139
140    /// Options for execution, including but not limited to whether debug or tracing is enabled,
141    /// the size of core trace fragments during execution, etc.
142    options: ExecutionOptions,
143
144    /// Transcript used to record commitments via `log_precompile` instruction (implemented via
145    /// Poseidon2 sponge).
146    pc_transcript: PrecompileTranscript,
147
148    /// Tracks decorator retrieval calls for testing.
149    #[cfg(test)]
150    pub decorator_retrieval_count: Rc<Cell<usize>>,
151}
152
153impl FastProcessor {
154    // CONSTRUCTORS
155    // ----------------------------------------------------------------------------------------------
156
157    /// Creates a new `FastProcessor` instance with the given stack inputs.
158    ///
159    /// By default, advice inputs are empty and execution options use their defaults
160    /// (debugging and tracing disabled).
161    ///
162    /// # Example
163    /// ```ignore
164    /// use miden_processor::FastProcessor;
165    ///
166    /// let processor = FastProcessor::new(stack_inputs)
167    ///     .with_advice(advice_inputs)
168    ///     .with_debugging(true)
169    ///     .with_tracing(true);
170    /// ```
171    pub fn new(stack_inputs: StackInputs) -> Self {
172        Self::new_with_options(stack_inputs, AdviceInputs::default(), ExecutionOptions::default())
173    }
174
175    /// Sets the advice inputs for the processor.
176    pub fn with_advice(mut self, advice_inputs: AdviceInputs) -> Self {
177        self.advice = advice_inputs.into();
178        self
179    }
180
181    /// Sets the execution options for the processor.
182    ///
183    /// This will override any previously set debugging or tracing settings.
184    pub fn with_options(mut self, options: ExecutionOptions) -> Self {
185        self.options = options;
186        self
187    }
188
189    /// Enables or disables debugging mode.
190    ///
191    /// When debugging is enabled, debug decorators will be executed during program execution.
192    pub fn with_debugging(mut self, enabled: bool) -> Self {
193        self.options = self.options.with_debugging(enabled);
194        self
195    }
196
197    /// Enables or disables tracing mode.
198    ///
199    /// When tracing is enabled, trace decorators will be executed during program execution.
200    pub fn with_tracing(mut self, enabled: bool) -> Self {
201        self.options = self.options.with_tracing(enabled);
202        self
203    }
204
205    /// Constructor for creating a `FastProcessor` with all options specified at once.
206    ///
207    /// For a more fluent API, consider using `FastProcessor::new()` with builder methods.
208    pub fn new_with_options(
209        stack_inputs: StackInputs,
210        advice_inputs: AdviceInputs,
211        options: ExecutionOptions,
212    ) -> Self {
213        let stack_top_idx = INITIAL_STACK_TOP_IDX;
214        let stack = {
215            // Note: we use `Vec::into_boxed_slice()` here, since `Box::new([T; N])` first allocates
216            // the array on the stack, and then moves it to the heap. This might cause a
217            // stack overflow on some systems.
218            let mut stack: Box<[Felt; STACK_BUFFER_SIZE]> =
219                vec![ZERO; STACK_BUFFER_SIZE].into_boxed_slice().try_into().unwrap();
220
221            // Copy inputs in reverse order so first element ends up at top of stack
222            for (i, &input) in stack_inputs.iter().enumerate() {
223                stack[stack_top_idx - 1 - i] = input;
224            }
225            stack
226        };
227
228        Self {
229            advice: advice_inputs.into(),
230            stack,
231            stack_top_idx,
232            stack_bot_idx: stack_top_idx - MIN_STACK_DEPTH,
233            clk: 0_u32.into(),
234            ctx: 0_u32.into(),
235            caller_hash: EMPTY_WORD,
236            memory: Memory::new(),
237            call_stack: Vec::new(),
238            options,
239            pc_transcript: PrecompileTranscript::new(),
240            #[cfg(test)]
241            decorator_retrieval_count: Rc::new(Cell::new(0)),
242        }
243    }
244
245    /// Returns the resume context to be used with the first call to `step()`.
246    pub fn get_initial_resume_context(
247        &mut self,
248        program: &Program,
249    ) -> Result<ResumeContext, ExecutionError> {
250        self.advice
251            .extend_map(program.mast_forest().advice_map())
252            .map_exec_err_no_ctx()?;
253
254        Ok(ResumeContext {
255            current_forest: program.mast_forest().clone(),
256            continuation_stack: ContinuationStack::new(program),
257            kernel: program.kernel().clone(),
258        })
259    }
260
261    // ACCESSORS
262    // -------------------------------------------------------------------------------------------
263
264    /// Returns whether the processor is executing in debug mode.
265    #[inline(always)]
266    pub fn in_debug_mode(&self) -> bool {
267        self.options.enable_debugging()
268    }
269
270    /// Returns true if decorators should be executed.
271    ///
272    /// This corresponds to either being in debug mode (for debug decorators) or having tracing
273    /// enabled (for trace decorators).
274    #[inline(always)]
275    fn should_execute_decorators(&self) -> bool {
276        self.in_debug_mode() || self.options.enable_tracing()
277    }
278
279    #[cfg(test)]
280    #[inline(always)]
281    fn record_decorator_retrieval(&self) {
282        self.decorator_retrieval_count.set(self.decorator_retrieval_count.get() + 1);
283    }
284
285    /// Returns the size of the stack.
286    #[inline(always)]
287    fn stack_size(&self) -> usize {
288        self.stack_top_idx - self.stack_bot_idx
289    }
290
291    /// Returns the stack, such that the top of the stack is at the last index of the returned
292    /// slice.
293    pub fn stack(&self) -> &[Felt] {
294        &self.stack[self.stack_bot_idx..self.stack_top_idx]
295    }
296
297    /// Returns the top 16 elements of the stack.
298    pub fn stack_top(&self) -> &[Felt] {
299        &self.stack[self.stack_top_idx - MIN_STACK_DEPTH..self.stack_top_idx]
300    }
301
302    /// Returns a mutable reference to the top 16 elements of the stack.
303    pub fn stack_top_mut(&mut self) -> &mut [Felt] {
304        &mut self.stack[self.stack_top_idx - MIN_STACK_DEPTH..self.stack_top_idx]
305    }
306
307    /// Returns the element on the stack at index `idx`.
308    #[inline(always)]
309    pub fn stack_get(&self, idx: usize) -> Felt {
310        self.stack[self.stack_top_idx - idx - 1]
311    }
312
313    /// Mutable variant of `stack_get()`.
314    #[inline(always)]
315    pub fn stack_get_mut(&mut self, idx: usize) -> &mut Felt {
316        &mut self.stack[self.stack_top_idx - idx - 1]
317    }
318
319    /// Returns the word on the stack starting at index `start_idx` in "stack order".
320    ///
321    /// For `start_idx=0` the top element of the stack will be at position 0 in the word.
322    ///
323    /// For example, if the stack looks like this:
324    ///
325    /// top                                                       bottom
326    /// v                                                           v
327    /// a | b | c | d | e | f | g | h | i | j | k | l | m | n | o | p
328    ///
329    /// Then
330    /// - `stack_get_word(0)` returns `[a, b, c, d]`,
331    /// - `stack_get_word(1)` returns `[b, c, d, e]`,
332    /// - etc.
333    #[inline(always)]
334    pub fn stack_get_word(&self, start_idx: usize) -> Word {
335        // Ensure we have enough elements to form a complete word
336        debug_assert!(
337            start_idx + WORD_SIZE <= self.stack_depth() as usize,
338            "Not enough elements on stack to read word starting at index {start_idx}"
339        );
340
341        let word_start_idx = self.stack_top_idx - start_idx - 4;
342        let mut result: [Felt; WORD_SIZE] =
343            self.stack[range(word_start_idx, WORD_SIZE)].try_into().unwrap();
344        // Reverse so top of stack (idx 0) goes to word[0]
345        result.reverse();
346        result.into()
347    }
348
349    /// Returns the number of elements on the stack in the current context.
350    #[inline(always)]
351    pub fn stack_depth(&self) -> u32 {
352        (self.stack_top_idx - self.stack_bot_idx) as u32
353    }
354
355    /// Returns a reference to the processor's memory.
356    pub fn memory(&self) -> &Memory {
357        &self.memory
358    }
359
360    /// Returns a narrowed interface for reading and updating the processor state.
361    #[inline(always)]
362    pub fn state(&mut self) -> ProcessorState<'_> {
363        ProcessorState { processor: self }
364    }
365
366    // MUTATORS
367    // -------------------------------------------------------------------------------------------
368
369    /// Writes an element to the stack at the given index.
370    #[inline(always)]
371    pub fn stack_write(&mut self, idx: usize, element: Felt) {
372        self.stack[self.stack_top_idx - idx - 1] = element
373    }
374
375    /// Writes a word to the stack starting at the given index.
376    ///
377    /// `word[0]` goes to stack position start_idx (top), `word[1]` to start_idx+1, etc.
378    #[inline(always)]
379    pub fn stack_write_word(&mut self, start_idx: usize, word: &Word) {
380        debug_assert!(start_idx < MIN_STACK_DEPTH);
381
382        let word_start_idx = self.stack_top_idx - start_idx - 4;
383        let mut source: [Felt; WORD_SIZE] = (*word).into();
384        // Reverse so word[0] ends up at the top of stack (highest internal index)
385        source.reverse();
386        self.stack[range(word_start_idx, WORD_SIZE)].copy_from_slice(&source)
387    }
388
389    /// Swaps the elements at the given indices on the stack.
390    #[inline(always)]
391    pub fn stack_swap(&mut self, idx1: usize, idx2: usize) {
392        let a = self.stack_get(idx1);
393        let b = self.stack_get(idx2);
394        self.stack_write(idx1, b);
395        self.stack_write(idx2, a);
396    }
397
398    // EXECUTE
399    // -------------------------------------------------------------------------------------------
400
401    /// Executes the given program and returns the stack outputs as well as the advice provider.
402    pub async fn execute(
403        self,
404        program: &Program,
405        host: &mut impl Host,
406    ) -> Result<ExecutionOutput, ExecutionError> {
407        self.execute_with_tracer(program, host, &mut NoopTracer).await
408    }
409
410    /// Executes the given program and returns the stack outputs, the advice provider, and
411    /// context necessary to build the trace.
412    #[instrument(name = "execute_for_trace", skip_all)]
413    pub async fn execute_for_trace(
414        self,
415        program: &Program,
416        host: &mut impl Host,
417    ) -> Result<(ExecutionOutput, TraceGenerationContext), ExecutionError> {
418        let mut tracer = ExecutionTracer::new(self.options.core_trace_fragment_size());
419        let execution_output = self.execute_with_tracer(program, host, &mut tracer).await?;
420
421        // Pass the final precompile transcript from execution output to the trace generation
422        // context
423        let context = tracer.into_trace_generation_context(execution_output.final_pc_transcript);
424
425        Ok((execution_output, context))
426    }
427
428    /// Executes the given program with the provided tracer and returns the stack outputs, and the
429    /// advice provider.
430    pub async fn execute_with_tracer<T>(
431        mut self,
432        program: &Program,
433        host: &mut impl Host,
434        tracer: &mut T,
435    ) -> Result<ExecutionOutput, ExecutionError>
436    where
437        T: Tracer<Processor = Self>,
438    {
439        let mut continuation_stack = ContinuationStack::new(program);
440        let mut current_forest = program.mast_forest().clone();
441
442        // Merge the program's advice map into the advice provider
443        self.advice.extend_map(current_forest.advice_map()).map_exec_err_no_ctx()?;
444
445        match self
446            .execute_impl(
447                &mut continuation_stack,
448                &mut current_forest,
449                program.kernel(),
450                host,
451                tracer,
452                &NeverStopper,
453            )
454            .await
455        {
456            ControlFlow::Continue(stack_outputs) => Ok(ExecutionOutput {
457                stack: stack_outputs,
458                advice: self.advice,
459                memory: self.memory,
460                final_pc_transcript: self.pc_transcript,
461            }),
462            ControlFlow::Break(break_reason) => match break_reason {
463                BreakReason::Err(err) => Err(err),
464                BreakReason::Stopped(_) => {
465                    unreachable!("Execution never stops prematurely with NeverStopper")
466                },
467            },
468        }
469    }
470
471    /// Executes a single clock cycle
472    pub async fn step(
473        &mut self,
474        host: &mut impl Host,
475        resume_ctx: ResumeContext,
476    ) -> Result<Option<ResumeContext>, ExecutionError> {
477        let ResumeContext {
478            mut current_forest,
479            mut continuation_stack,
480            kernel,
481        } = resume_ctx;
482
483        match self
484            .execute_impl(
485                &mut continuation_stack,
486                &mut current_forest,
487                &kernel,
488                host,
489                &mut NoopTracer,
490                &StepStopper,
491            )
492            .await
493        {
494            ControlFlow::Continue(_) => Ok(None),
495            ControlFlow::Break(break_reason) => match break_reason {
496                BreakReason::Err(err) => Err(err),
497                BreakReason::Stopped(maybe_continuation) => {
498                    if let Some(continuation) = maybe_continuation {
499                        continuation_stack.push_continuation(continuation);
500                    }
501
502                    Ok(Some(ResumeContext {
503                        current_forest,
504                        continuation_stack,
505                        kernel,
506                    }))
507                },
508            },
509        }
510    }
511
512    /// Executes the given program with the provided tracer and returns the stack outputs.
513    ///
514    /// This function takes a `&mut self` (compared to `self` for the public execute functions) so
515    /// that the processor state may be accessed after execution. It is incorrect to execute a
516    /// second program using the same processor. This is mainly meant to be used in tests.
517    async fn execute_impl<S, T>(
518        &mut self,
519        continuation_stack: &mut ContinuationStack,
520        current_forest: &mut Arc<MastForest>,
521        kernel: &Kernel,
522        host: &mut impl Host,
523        tracer: &mut T,
524        stopper: &S,
525    ) -> ControlFlow<BreakReason, StackOutputs>
526    where
527        S: Stopper<Processor = Self>,
528        T: Tracer<Processor = Self>,
529    {
530        while let ControlFlow::Break(internal_break_reason) =
531            execute_impl(self, continuation_stack, current_forest, kernel, host, tracer, stopper)
532        {
533            match internal_break_reason {
534                InternalBreakReason::User(break_reason) => return ControlFlow::Break(break_reason),
535                InternalBreakReason::Emit {
536                    basic_block_node_id,
537                    op_idx,
538                    continuation,
539                } => {
540                    self.op_emit(host, current_forest, basic_block_node_id, op_idx).await?;
541
542                    // Call `finish_emit_op_execution()`, as per the sans-IO contract.
543                    finish_emit_op_execution(
544                        continuation,
545                        self,
546                        continuation_stack,
547                        current_forest,
548                        tracer,
549                        stopper,
550                    )?;
551                },
552                InternalBreakReason::LoadMastForestFromDyn { dyn_node_id, callee_hash } => {
553                    // load mast forest asynchronously
554                    let (root_id, new_forest) = match self
555                        .load_mast_forest(
556                            callee_hash,
557                            host,
558                            |digest| OperationError::DynamicNodeNotFound { digest },
559                            current_forest,
560                            dyn_node_id,
561                        )
562                        .await
563                    {
564                        Ok(result) => result,
565                        Err(err) => return ControlFlow::Break(BreakReason::Err(err)),
566                    };
567
568                    // Finish loading the MAST forest from the Dyn node, as per the sans-IO
569                    // contract.
570                    finish_load_mast_forest_from_dyn_start(
571                        root_id,
572                        new_forest,
573                        self,
574                        current_forest,
575                        continuation_stack,
576                        tracer,
577                        stopper,
578                    )?;
579                },
580                InternalBreakReason::LoadMastForestFromExternal {
581                    external_node_id,
582                    procedure_hash,
583                } => {
584                    // load mast forest asynchronously
585                    let (root_id, new_forest) = match self
586                        .load_mast_forest(
587                            procedure_hash,
588                            host,
589                            |root_digest| OperationError::NoMastForestWithProcedure { root_digest },
590                            current_forest,
591                            external_node_id,
592                        )
593                        .await
594                    {
595                        Ok(result) => result,
596                        Err(err) => {
597                            let maybe_enriched_err = maybe_use_caller_error_context(
598                                err,
599                                current_forest,
600                                continuation_stack,
601                                host,
602                            );
603
604                            return ControlFlow::Break(BreakReason::Err(maybe_enriched_err));
605                        },
606                    };
607
608                    // Finish loading the MAST forest from the External node, as per the sans-IO
609                    // contract.
610                    finish_load_mast_forest_from_external(
611                        root_id,
612                        new_forest,
613                        external_node_id,
614                        current_forest,
615                        continuation_stack,
616                        host,
617                        tracer,
618                    )?;
619                },
620            }
621        }
622
623        match StackOutputs::new(
624            &self.stack[self.stack_bot_idx..self.stack_top_idx]
625                .iter()
626                .rev()
627                .copied()
628                .collect::<Vec<_>>(),
629        ) {
630            Ok(stack_outputs) => ControlFlow::Continue(stack_outputs),
631            Err(_) => ControlFlow::Break(BreakReason::Err(ExecutionError::OutputStackOverflow(
632                self.stack_top_idx - self.stack_bot_idx - MIN_STACK_DEPTH,
633            ))),
634        }
635    }
636
637    // DECORATOR EXECUTORS
638    // --------------------------------------------------------------------------------------------
639
640    /// Executes the decorators that should be executed before entering a node.
641    fn execute_before_enter_decorators(
642        &mut self,
643        node_id: MastNodeId,
644        current_forest: &MastForest,
645        host: &mut impl Host,
646    ) -> ControlFlow<BreakReason> {
647        if !self.should_execute_decorators() {
648            return ControlFlow::Continue(());
649        }
650
651        #[cfg(test)]
652        self.record_decorator_retrieval();
653
654        let node = current_forest
655            .get_node_by_id(node_id)
656            .expect("internal error: node id {node_id} not found in current forest");
657
658        for &decorator_id in node.before_enter(current_forest) {
659            self.execute_decorator(&current_forest[decorator_id], host)?;
660        }
661
662        ControlFlow::Continue(())
663    }
664
665    /// Executes the decorators that should be executed after exiting a node.
666    fn execute_after_exit_decorators(
667        &mut self,
668        node_id: MastNodeId,
669        current_forest: &MastForest,
670        host: &mut impl Host,
671    ) -> ControlFlow<BreakReason> {
672        if !self.in_debug_mode() {
673            return ControlFlow::Continue(());
674        }
675
676        #[cfg(test)]
677        self.record_decorator_retrieval();
678
679        let node = current_forest
680            .get_node_by_id(node_id)
681            .expect("internal error: node id {node_id} not found in current forest");
682
683        for &decorator_id in node.after_exit(current_forest) {
684            self.execute_decorator(&current_forest[decorator_id], host)?;
685        }
686
687        ControlFlow::Continue(())
688    }
689
690    /// Executes the specified decorator
691    fn execute_decorator(
692        &mut self,
693        decorator: &Decorator,
694        host: &mut impl Host,
695    ) -> ControlFlow<BreakReason> {
696        match decorator {
697            Decorator::Debug(options) => {
698                if self.in_debug_mode() {
699                    let process = &mut self.state();
700                    if let Err(err) = host.on_debug(process, options) {
701                        return ControlFlow::Break(BreakReason::Err(
702                            ExecutionError::DebugHandlerError { err },
703                        ));
704                    }
705                }
706            },
707            Decorator::Trace(id) => {
708                if self.options.enable_tracing() {
709                    let process = &mut self.state();
710                    if let Err(err) = host.on_trace(process, *id) {
711                        return ControlFlow::Break(BreakReason::Err(
712                            ExecutionError::TraceHandlerError { trace_id: *id, err },
713                        ));
714                    }
715                }
716            },
717        };
718        ControlFlow::Continue(())
719    }
720
721    // HELPERS
722    // ----------------------------------------------------------------------------------------------
723
724    async fn load_mast_forest(
725        &mut self,
726        node_digest: Word,
727        host: &mut impl Host,
728        get_mast_forest_failed: impl Fn(Word) -> OperationError,
729        current_forest: &MastForest,
730        node_id: MastNodeId,
731    ) -> Result<(MastNodeId, Arc<MastForest>), ExecutionError> {
732        let mast_forest = host
733            .get_mast_forest(&node_digest)
734            .await
735            .ok_or_else(|| get_mast_forest_failed(node_digest))
736            .map_exec_err(current_forest, node_id, host)?;
737
738        // We limit the parts of the program that can be called externally to procedure
739        // roots, even though MAST doesn't have that restriction.
740        let root_id = mast_forest.find_procedure_root(node_digest).ok_or_else(|| {
741            Err::<(), _>(OperationError::MalformedMastForestInHost { root_digest: node_digest })
742                .map_exec_err(current_forest, node_id, host)
743                .unwrap_err()
744        })?;
745
746        // Merge the advice map of this forest into the advice provider.
747        // Note that the map may be merged multiple times if a different procedure from the same
748        // forest is called.
749        // For now, only compiled libraries contain non-empty advice maps, so for most cases,
750        // this call will be cheap.
751        self.advice.extend_map(mast_forest.advice_map()).map_exec_err(
752            current_forest,
753            node_id,
754            host,
755        )?;
756
757        Ok((root_id, mast_forest))
758    }
759
760    /// Increments the stack top pointer by 1.
761    ///
762    /// The bottom of the stack is never affected by this operation.
763    #[inline(always)]
764    fn increment_stack_size(&mut self) {
765        self.stack_top_idx += 1;
766    }
767
768    /// Decrements the stack top pointer by 1.
769    ///
770    /// The bottom of the stack is only decremented in cases where the stack depth would become less
771    /// than 16.
772    #[inline(always)]
773    fn decrement_stack_size(&mut self) {
774        if self.stack_top_idx == MIN_STACK_DEPTH {
775            // We no longer have any room in the stack buffer to decrement the stack size (which
776            // would cause the `stack_bot_idx` to go below 0). We therefore reset the stack to its
777            // original position.
778            self.reset_stack_in_buffer(INITIAL_STACK_TOP_IDX);
779        }
780
781        self.stack_top_idx -= 1;
782        self.stack_bot_idx = min(self.stack_bot_idx, self.stack_top_idx - MIN_STACK_DEPTH);
783    }
784
785    /// Resets the stack in the buffer to a new position, preserving the top 16 elements of the
786    /// stack.
787    ///
788    /// # Preconditions
789    /// - The stack is expected to have exactly 16 elements.
790    #[inline(always)]
791    fn reset_stack_in_buffer(&mut self, new_stack_top_idx: usize) {
792        debug_assert_eq!(self.stack_depth(), MIN_STACK_DEPTH as u32);
793
794        let new_stack_bot_idx = new_stack_top_idx - MIN_STACK_DEPTH;
795
796        // Copy stack to its new position
797        self.stack
798            .copy_within(self.stack_bot_idx..self.stack_top_idx, new_stack_bot_idx);
799
800        // Zero out stack below the new new_stack_bot_idx, since this is where overflow values
801        // come from, and are guaranteed to be ZERO. We don't need to zero out above
802        // `stack_top_idx`, since values there are never read before being written.
803        self.stack[0..new_stack_bot_idx].fill(ZERO);
804
805        // Update indices.
806        self.stack_bot_idx = new_stack_bot_idx;
807        self.stack_top_idx = new_stack_top_idx;
808    }
809
810    // SYNC WRAPPERS
811    // ----------------------------------------------------------------------------------------------
812
813    /// Convenience sync wrapper to [Self::step].
814    pub fn step_sync(
815        &mut self,
816        host: &mut impl Host,
817        resume_ctx: ResumeContext,
818    ) -> Result<Option<ResumeContext>, ExecutionError> {
819        // Create a new Tokio runtime and block on the async execution
820        let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
821
822        let execution_output = rt.block_on(self.step(host, resume_ctx))?;
823
824        Ok(execution_output)
825    }
826
827    /// Executes the given program step by step (calling [`Self::step`] repeatedly) and returns the
828    /// stack outputs.
829    pub fn execute_by_step_sync(
830        mut self,
831        program: &Program,
832        host: &mut impl Host,
833    ) -> Result<StackOutputs, ExecutionError> {
834        // Create a new Tokio runtime and block on the async execution
835        let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
836        let mut current_resume_ctx = self.get_initial_resume_context(program).unwrap();
837
838        rt.block_on(async {
839            loop {
840                match self.step(host, current_resume_ctx).await {
841                    Ok(maybe_resume_ctx) => match maybe_resume_ctx {
842                        Some(next_resume_ctx) => {
843                            current_resume_ctx = next_resume_ctx;
844                        },
845                        None => {
846                            // End of program was reached
847                            break Ok(StackOutputs::new(
848                                &self.stack[self.stack_bot_idx..self.stack_top_idx]
849                                    .iter()
850                                    .rev()
851                                    .copied()
852                                    .collect::<Vec<_>>(),
853                            )
854                            .unwrap());
855                        },
856                    },
857                    Err(err) => {
858                        break Err(err);
859                    },
860                }
861            }
862        })
863    }
864
865    /// Convenience sync wrapper to [Self::execute].
866    ///
867    /// This method is only available on non-wasm32 targets. On wasm32, use the
868    /// async `execute()` method directly since wasm32 runs in the browser's event loop.
869    ///
870    /// # Panics
871    /// Panics if called from within an existing Tokio runtime. Use the async `execute()`
872    /// method instead in async contexts.
873    #[cfg(not(target_arch = "wasm32"))]
874    pub fn execute_sync(
875        self,
876        program: &Program,
877        host: &mut impl Host,
878    ) -> Result<ExecutionOutput, ExecutionError> {
879        match tokio::runtime::Handle::try_current() {
880            Ok(_handle) => {
881                // We're already inside a Tokio runtime - this is not supported
882                // because we cannot safely create a nested runtime or move the
883                // non-Send host reference to another thread
884                panic!(
885                    "Cannot call execute_sync from within a Tokio runtime. \
886                     Use the async execute() method instead."
887                )
888            },
889            Err(_) => {
890                // No runtime exists - create one and use it
891                let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
892                rt.block_on(self.execute(program, host))
893            },
894        }
895    }
896
897    /// Convenience sync wrapper to [Self::execute_for_trace].
898    ///
899    /// This method is only available on non-wasm32 targets. On wasm32, use the
900    /// async `execute_for_trace()` method directly since wasm32 runs in the browser's event loop.
901    ///
902    /// # Panics
903    /// Panics if called from within an existing Tokio runtime. Use the async `execute_for_trace()`
904    /// method instead in async contexts.
905    #[cfg(not(target_arch = "wasm32"))]
906    #[instrument(name = "execute_for_trace_sync", skip_all)]
907    pub fn execute_for_trace_sync(
908        self,
909        program: &Program,
910        host: &mut impl Host,
911    ) -> Result<(ExecutionOutput, TraceGenerationContext), ExecutionError> {
912        match tokio::runtime::Handle::try_current() {
913            Ok(_handle) => {
914                // We're already inside a Tokio runtime - this is not supported
915                // because we cannot safely create a nested runtime or move the
916                // non-Send host reference to another thread
917                panic!(
918                    "Cannot call execute_for_trace_sync from within a Tokio runtime. \
919                     Use the async execute_for_trace() method instead."
920                )
921            },
922            Err(_) => {
923                // No runtime exists - create one and use it
924                let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
925                rt.block_on(self.execute_for_trace(program, host))
926            },
927        }
928    }
929
930    /// Similar to [Self::execute_sync], but allows mutable access to the processor.
931    ///
932    /// This method is only available on non-wasm32 targets for testing. On wasm32, use
933    /// async execution methods directly since wasm32 runs in the browser's event loop.
934    ///
935    /// # Panics
936    /// Panics if called from within an existing Tokio runtime. Use async execution
937    /// methods instead in async contexts.
938    #[cfg(all(any(test, feature = "testing"), not(target_arch = "wasm32")))]
939    pub fn execute_sync_mut(
940        &mut self,
941        program: &Program,
942        host: &mut impl Host,
943    ) -> Result<StackOutputs, ExecutionError> {
944        let mut continuation_stack = ContinuationStack::new(program);
945        let mut current_forest = program.mast_forest().clone();
946
947        // Merge the program's advice map into the advice provider
948        self.advice.extend_map(current_forest.advice_map()).map_exec_err_no_ctx()?;
949
950        let execute_fut = async {
951            match self
952                .execute_impl(
953                    &mut continuation_stack,
954                    &mut current_forest,
955                    program.kernel(),
956                    host,
957                    &mut NoopTracer,
958                    &NeverStopper,
959                )
960                .await
961            {
962                ControlFlow::Continue(stack_outputs) => Ok(stack_outputs),
963                ControlFlow::Break(break_reason) => match break_reason {
964                    BreakReason::Err(err) => Err(err),
965                    BreakReason::Stopped(_) => {
966                        unreachable!("Execution never stops prematurely with NeverStopper")
967                    },
968                },
969            }
970        };
971
972        match tokio::runtime::Handle::try_current() {
973            Ok(_handle) => {
974                // We're already inside a Tokio runtime - this is not supported
975                // because we cannot safely create a nested runtime or move the
976                // non-Send host reference to another thread
977                panic!(
978                    "Cannot call execute_sync_mut from within a Tokio runtime. \
979                     Use async execution methods instead."
980                )
981            },
982            Err(_) => {
983                // No runtime exists - create one and use it
984                let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
985                rt.block_on(execute_fut)
986            },
987        }
988    }
989}
990
991// EXECUTION OUTPUT
992// ===============================================================================================
993
994/// The output of a program execution, containing the state of the stack, advice provider,
995/// memory, and final precompile transcript at the end of execution.
996#[derive(Debug)]
997pub struct ExecutionOutput {
998    pub stack: StackOutputs,
999    pub advice: AdviceProvider,
1000    pub memory: Memory,
1001    pub final_pc_transcript: PrecompileTranscript,
1002}
1003
1004// EXECUTION CONTEXT INFO
1005// ===============================================================================================
1006
1007/// Information about the execution context.
1008///
1009/// This struct is used to keep track of the information needed to return to the previous context
1010/// upon return from a `call`, `syscall` or `dyncall`.
1011#[derive(Debug)]
1012struct ExecutionContextInfo {
1013    /// This stores all the elements on the stack at the call site, excluding the top 16 elements.
1014    /// This corresponds to the overflow table in [crate::Process].
1015    overflow_stack: Vec<Felt>,
1016    ctx: ContextId,
1017    fn_hash: Word,
1018}
1019
1020// NOOP TRACER
1021// ================================================================================================
1022
1023/// A [Tracer] that does nothing.
1024pub struct NoopTracer;
1025
1026impl Tracer for NoopTracer {
1027    type Processor = FastProcessor;
1028
1029    #[inline(always)]
1030    fn start_clock_cycle(
1031        &mut self,
1032        _processor: &FastProcessor,
1033        _continuation: Continuation,
1034        _continuation_stack: &ContinuationStack,
1035        _current_forest: &Arc<MastForest>,
1036    ) {
1037        // do nothing
1038    }
1039
1040    #[inline(always)]
1041    fn record_mast_forest_resolution(&mut self, _node_id: MastNodeId, _forest: &Arc<MastForest>) {
1042        // do nothing
1043    }
1044
1045    #[inline(always)]
1046    fn record_hasher_permute(
1047        &mut self,
1048        _input_state: [Felt; STATE_WIDTH],
1049        _output_state: [Felt; STATE_WIDTH],
1050    ) {
1051        // do nothing
1052    }
1053
1054    #[inline(always)]
1055    fn record_hasher_build_merkle_root(
1056        &mut self,
1057        _node: Word,
1058        _path: Option<&MerklePath>,
1059        _index: Felt,
1060        _output_root: Word,
1061    ) {
1062        // do nothing
1063    }
1064
1065    #[inline(always)]
1066    fn record_hasher_update_merkle_root(
1067        &mut self,
1068        _old_node: Word,
1069        _new_node: Word,
1070        _path: Option<&MerklePath>,
1071        _index: Felt,
1072        _old_root: Word,
1073        _new_root: Word,
1074    ) {
1075        // do nothing
1076    }
1077
1078    #[inline(always)]
1079    fn record_memory_read_element(
1080        &mut self,
1081        _element: Felt,
1082        _addr: Felt,
1083        _ctx: ContextId,
1084        _clk: RowIndex,
1085    ) {
1086        // do nothing
1087    }
1088
1089    #[inline(always)]
1090    fn record_memory_read_word(
1091        &mut self,
1092        _word: Word,
1093        _addr: Felt,
1094        _ctx: ContextId,
1095        _clk: RowIndex,
1096    ) {
1097        // do nothing
1098    }
1099
1100    #[inline(always)]
1101    fn record_memory_write_element(
1102        &mut self,
1103        _element: Felt,
1104        _addr: Felt,
1105        _ctx: ContextId,
1106        _clk: RowIndex,
1107    ) {
1108        // do nothing
1109    }
1110
1111    #[inline(always)]
1112    fn record_memory_write_word(
1113        &mut self,
1114        _word: Word,
1115        _addr: Felt,
1116        _ctx: ContextId,
1117        _clk: RowIndex,
1118    ) {
1119        // do nothing
1120    }
1121
1122    #[inline(always)]
1123    fn record_advice_pop_stack(&mut self, _value: Felt) {
1124        // do nothing
1125    }
1126
1127    #[inline(always)]
1128    fn record_advice_pop_stack_word(&mut self, _word: Word) {
1129        // do nothing
1130    }
1131
1132    #[inline(always)]
1133    fn record_advice_pop_stack_dword(&mut self, _words: [Word; 2]) {
1134        // do nothing
1135    }
1136
1137    #[inline(always)]
1138    fn record_u32and(&mut self, _a: Felt, _b: Felt) {
1139        // do nothing
1140    }
1141
1142    #[inline(always)]
1143    fn record_u32xor(&mut self, _a: Felt, _b: Felt) {
1144        // do nothing
1145    }
1146
1147    #[inline(always)]
1148    fn record_u32_range_checks(&mut self, _clk: RowIndex, _u32_lo: Felt, _u32_hi: Felt) {
1149        // do nothing
1150    }
1151
1152    #[inline(always)]
1153    fn record_kernel_proc_access(&mut self, _proc_hash: Word) {
1154        // do nothing
1155    }
1156
1157    #[inline(always)]
1158    fn record_circuit_evaluation(&mut self, _circuit_evaluation: CircuitEvaluation) {
1159        // do nothing
1160    }
1161
1162    #[inline(always)]
1163    fn finalize_clock_cycle(
1164        &mut self,
1165        _processor: &FastProcessor,
1166        _op_helper_registers: OperationHelperRegisters,
1167        _current_forest: &Arc<MastForest>,
1168    ) {
1169        // do nothing
1170    }
1171
1172    #[inline(always)]
1173    fn increment_stack_size(&mut self, _processor: &FastProcessor) {
1174        // do nothing
1175    }
1176
1177    #[inline(always)]
1178    fn decrement_stack_size(&mut self) {
1179        // do nothing
1180    }
1181
1182    #[inline(always)]
1183    fn start_context(&mut self) {
1184        // do nothing
1185    }
1186
1187    #[inline(always)]
1188    fn restore_context(&mut self) {
1189        // do nothing
1190    }
1191}