miden_processor/fast/
mod.rs

1use alloc::{boxed::Box, sync::Arc, vec::Vec};
2use core::cmp::min;
3
4use memory::Memory;
5use miden_air::{Felt, RowIndex};
6use miden_core::{
7    Decorator, EMPTY_WORD, Program, StackOutputs, WORD_SIZE, Word, ZERO,
8    mast::{MastForest, MastNode, MastNodeExt, MastNodeId},
9    precompile::PrecompileTranscript,
10    stack::MIN_STACK_DEPTH,
11    utils::range,
12};
13
14use crate::{
15    AdviceInputs, AdviceProvider, AsyncHost, ContextId, ErrorContext, ExecutionError, ProcessState,
16    chiplets::Ace,
17    continuation_stack::{Continuation, ContinuationStack},
18    fast::execution_tracer::{ExecutionTracer, TraceGenerationContext},
19};
20
21pub mod execution_tracer;
22mod memory;
23mod operation;
24pub use operation::eval_circuit_fast_;
25pub mod trace_state;
26mod tracer;
27pub use tracer::{NoopTracer, Tracer};
28
29mod basic_block;
30mod call_and_dyn;
31mod external;
32mod join;
33mod r#loop;
34mod split;
35
36#[cfg(test)]
37mod tests;
38
39/// The size of the stack buffer.
40///
41/// Note: This value is much larger than it needs to be for the majority of programs. However, some
42/// existing programs need it, so we're forced to push it up (though this should be double-checked).
43/// At this high a value, we're starting to see some performance degradation on benchmarks. For
44/// example, the blake3 benchmark went from 285 MHz to 250 MHz (~10% degradation). Perhaps a better
45/// solution would be to make this value much smaller (~1000), and then fallback to a `Vec` if the
46/// stack overflows.
47const STACK_BUFFER_SIZE: usize = 6850;
48
49/// The initial position of the top of the stack in the stack buffer.
50///
51/// We place this value close to 0 because if a program hits the limit, it's much more likely to hit
52/// the upper bound than the lower bound, since hitting the lower bound only occurs when you drop
53/// 0's that were generated automatically to keep the stack depth at 16. In practice, if this
54/// occurs, it is most likely a bug.
55const INITIAL_STACK_TOP_IDX: usize = 250;
56
57/// A fast processor which doesn't generate any trace.
58///
59/// This processor is designed to be as fast as possible. Hence, it only keeps track of the current
60/// state of the processor (i.e. the stack, current clock cycle, current memory context, and free
61/// memory pointer).
62///
63/// # Stack Management
64/// A few key points about how the stack was designed for maximum performance:
65///
66/// - The stack has a fixed buffer size defined by `STACK_BUFFER_SIZE`.
67///     - This was observed to increase performance by at least 2x compared to using a `Vec` with
68///       `push()` & `pop()`.
69///     - We track the stack top and bottom using indices `stack_top_idx` and `stack_bot_idx`,
70///       respectively.
71/// - Since we are using a fixed-size buffer, we need to ensure that stack buffer accesses are not
72///   out of bounds. Naively, we could check for this on every access. However, every operation
73///   alters the stack depth by a predetermined amount, allowing us to precisely determine the
74///   minimum number of operations required to reach a stack buffer boundary, whether at the top or
75///   bottom.
76///     - For example, if the stack top is 10 elements away from the top boundary, and the stack
77///       bottom is 15 elements away from the bottom boundary, then we can safely execute 10
78///       operations that modify the stack depth with no bounds check.
79/// - When switching contexts (e.g., during a call or syscall), all elements past the first 16 are
80///   stored in an `ExecutionContextInfo` struct, and the stack is truncated to 16 elements. This
81///   will be restored when returning from the call or syscall.
82///
83/// # Clock Cycle Management
84/// - The clock cycle (`clk`) is managed in the same way as in `Process`. That is, it is incremented
85///   by 1 for every row that `Process` adds to the main trace.
86///     - It is important to do so because the clock cycle is used to determine the context ID for
87///       new execution contexts when using `call` or `dyncall`.
88#[derive(Debug)]
89pub struct FastProcessor {
90    /// The stack is stored in reverse order, so that the last element is at the top of the stack.
91    pub(super) stack: Box<[Felt; STACK_BUFFER_SIZE]>,
92    /// The index of the top of the stack.
93    stack_top_idx: usize,
94    /// The index of the bottom of the stack.
95    stack_bot_idx: usize,
96
97    /// The current clock cycle.
98    pub(super) clk: RowIndex,
99
100    /// The current context ID.
101    pub(super) ctx: ContextId,
102
103    /// The hash of the function that called into the current context, or `[ZERO, ZERO, ZERO,
104    /// ZERO]` if we are in the first context (i.e. when `call_stack` is empty).
105    pub(super) caller_hash: Word,
106
107    /// The advice provider to be used during execution.
108    pub(super) advice: AdviceProvider,
109
110    /// A map from (context_id, word_address) to the word stored starting at that memory location.
111    pub(super) memory: Memory,
112
113    /// A map storing metadata per call to the ACE chiplet.
114    pub(super) ace: Ace,
115
116    /// The call stack is used when starting a new execution context (from a `call`, `syscall` or
117    /// `dyncall`) to keep track of the information needed to return to the previous context upon
118    /// return. It is a stack since calls can be nested.
119    call_stack: Vec<ExecutionContextInfo>,
120
121    /// Whether to enable debug statements and tracing.
122    in_debug_mode: bool,
123
124    /// Transcript used to record commitments via `log_precompile` instruction (implemented via RPO
125    /// sponge).
126    pc_transcript: PrecompileTranscript,
127}
128
129impl FastProcessor {
130    // CONSTRUCTORS
131    // ----------------------------------------------------------------------------------------------
132
133    /// Creates a new `FastProcessor` instance with the given stack inputs.
134    ///
135    /// # Panics
136    /// - Panics if the length of `stack_inputs` is greater than `MIN_STACK_DEPTH`.
137    pub fn new(stack_inputs: &[Felt]) -> Self {
138        Self::initialize(stack_inputs, AdviceInputs::default(), false)
139    }
140
141    /// Creates a new `FastProcessor` instance with the given stack and advice inputs.
142    ///
143    /// # Panics
144    /// - Panics if the length of `stack_inputs` is greater than `MIN_STACK_DEPTH`.
145    pub fn new_with_advice_inputs(stack_inputs: &[Felt], advice_inputs: AdviceInputs) -> Self {
146        Self::initialize(stack_inputs, advice_inputs, false)
147    }
148
149    /// Creates a new `FastProcessor` instance, set to debug mode, with the given stack
150    /// and advice inputs.
151    ///
152    /// # Panics
153    /// - Panics if the length of `stack_inputs` is greater than `MIN_STACK_DEPTH`.
154    pub fn new_debug(stack_inputs: &[Felt], advice_inputs: AdviceInputs) -> Self {
155        Self::initialize(stack_inputs, advice_inputs, true)
156    }
157
158    /// Generic constructor unifying the above public ones.
159    ///
160    /// The stack inputs are expected to be stored in reverse order. For example, if `stack_inputs =
161    /// [1,2,3]`, then the stack will be initialized as `[3,2,1,0,0,...]`, with `3` being on
162    /// top.
163    fn initialize(stack_inputs: &[Felt], advice_inputs: AdviceInputs, in_debug_mode: bool) -> Self {
164        assert!(stack_inputs.len() <= MIN_STACK_DEPTH);
165
166        let stack_top_idx = INITIAL_STACK_TOP_IDX;
167        let stack = {
168            // Note: we use `Vec::into_boxed_slice()` here, since `Box::new([T; N])` first allocates
169            // the array on the stack, and then moves it to the heap. This might cause a
170            // stack overflow on some systems.
171            let mut stack: Box<[Felt; STACK_BUFFER_SIZE]> =
172                vec![ZERO; STACK_BUFFER_SIZE].into_boxed_slice().try_into().unwrap();
173            let bottom_idx = stack_top_idx - stack_inputs.len();
174
175            stack[bottom_idx..stack_top_idx].copy_from_slice(stack_inputs);
176            stack
177        };
178
179        Self {
180            advice: advice_inputs.into(),
181            stack,
182            stack_top_idx,
183            stack_bot_idx: stack_top_idx - MIN_STACK_DEPTH,
184            clk: 0_u32.into(),
185            ctx: 0_u32.into(),
186            caller_hash: EMPTY_WORD,
187            memory: Memory::new(),
188            call_stack: Vec::new(),
189            ace: Ace::default(),
190            in_debug_mode,
191            pc_transcript: PrecompileTranscript::new(),
192        }
193    }
194
195    // ACCESSORS
196    // -------------------------------------------------------------------------------------------
197
198    /// Returns the size of the stack.
199    #[inline(always)]
200    fn stack_size(&self) -> usize {
201        self.stack_top_idx - self.stack_bot_idx
202    }
203
204    /// Returns the stack, such that the top of the stack is at the last index of the returned
205    /// slice.
206    pub fn stack(&self) -> &[Felt] {
207        &self.stack[self.stack_bot_idx..self.stack_top_idx]
208    }
209
210    /// Returns the top 16 elements of the stack.
211    pub fn stack_top(&self) -> &[Felt] {
212        &self.stack[self.stack_top_idx - MIN_STACK_DEPTH..self.stack_top_idx]
213    }
214
215    /// Returns a mutable reference to the top 16 elements of the stack.
216    pub fn stack_top_mut(&mut self) -> &mut [Felt] {
217        &mut self.stack[self.stack_top_idx - MIN_STACK_DEPTH..self.stack_top_idx]
218    }
219
220    /// Returns the element on the stack at index `idx`.
221    #[inline(always)]
222    pub fn stack_get(&self, idx: usize) -> Felt {
223        self.stack[self.stack_top_idx - idx - 1]
224    }
225
226    /// Mutable variant of `stack_get()`.
227    #[inline(always)]
228    pub fn stack_get_mut(&mut self, idx: usize) -> &mut Felt {
229        &mut self.stack[self.stack_top_idx - idx - 1]
230    }
231
232    /// Returns the word on the stack starting at index `start_idx` in "stack order".
233    ///
234    /// That is, for `start_idx=0` the top element of the stack will be at the last position in the
235    /// word.
236    ///
237    /// For example, if the stack looks like this:
238    ///
239    /// top                                                       bottom
240    /// v                                                           v
241    /// a | b | c | d | e | f | g | h | i | j | k | l | m | n | o | p
242    ///
243    /// Then
244    /// - `stack_get_word(0)` returns `[d, c, b, a]`,
245    /// - `stack_get_word(1)` returns `[e, d, c ,b]`,
246    /// - etc.
247    #[inline(always)]
248    pub fn stack_get_word(&self, start_idx: usize) -> Word {
249        // Ensure we have enough elements to form a complete word
250        debug_assert!(
251            start_idx + WORD_SIZE <= self.stack_depth() as usize,
252            "Not enough elements on stack to read word starting at index {start_idx}"
253        );
254
255        let word_start_idx = self.stack_top_idx - start_idx - 4;
256        let result: [Felt; WORD_SIZE] =
257            self.stack[range(word_start_idx, WORD_SIZE)].try_into().unwrap();
258        result.into()
259    }
260
261    /// Returns the number of elements on the stack in the current context.
262    #[inline(always)]
263    pub fn stack_depth(&self) -> u32 {
264        (self.stack_top_idx - self.stack_bot_idx) as u32
265    }
266
267    // MUTATORS
268    // -------------------------------------------------------------------------------------------
269
270    /// Writes an element to the stack at the given index.
271    #[inline(always)]
272    pub fn stack_write(&mut self, idx: usize, element: Felt) {
273        self.stack[self.stack_top_idx - idx - 1] = element
274    }
275
276    /// Writes a word to the stack starting at the given index.
277    ///
278    /// The index is the index of the first element of the word, and the word is written in reverse
279    /// order.
280    #[inline(always)]
281    pub fn stack_write_word(&mut self, start_idx: usize, word: &Word) {
282        debug_assert!(start_idx < MIN_STACK_DEPTH);
283
284        let word_start_idx = self.stack_top_idx - start_idx - 4;
285        let source: [Felt; WORD_SIZE] = (*word).into();
286        self.stack[range(word_start_idx, WORD_SIZE)].copy_from_slice(&source)
287    }
288
289    /// Swaps the elements at the given indices on the stack.
290    #[inline(always)]
291    pub fn stack_swap(&mut self, idx1: usize, idx2: usize) {
292        let a = self.stack_get(idx1);
293        let b = self.stack_get(idx2);
294        self.stack_write(idx1, b);
295        self.stack_write(idx2, a);
296    }
297
298    // EXECUTE
299    // -------------------------------------------------------------------------------------------
300
301    /// Executes the given program and returns the stack outputs as well as the advice provider.
302    pub async fn execute(
303        self,
304        program: &Program,
305        host: &mut impl AsyncHost,
306    ) -> Result<ExecutionOutput, ExecutionError> {
307        self.execute_with_tracer(program, host, &mut NoopTracer).await
308    }
309
310    /// Executes the given program and returns the stack outputs, the advice provider, and
311    /// context necessary to build the trace.
312    pub async fn execute_for_trace(
313        self,
314        program: &Program,
315        host: &mut impl AsyncHost,
316        fragment_size: usize,
317    ) -> Result<(ExecutionOutput, TraceGenerationContext), ExecutionError> {
318        let mut tracer = ExecutionTracer::new(fragment_size);
319        let execution_output = self.execute_with_tracer(program, host, &mut tracer).await?;
320
321        // Pass the final precompile transcript from execution output to the trace generation
322        // context
323        let context = tracer.into_trace_generation_context(execution_output.final_pc_transcript);
324
325        Ok((execution_output, context))
326    }
327
328    /// Executes the given program with the provided tracer and returns the stack outputs, and the
329    /// advice provider.
330    pub async fn execute_with_tracer(
331        mut self,
332        program: &Program,
333        host: &mut impl AsyncHost,
334        tracer: &mut impl Tracer,
335    ) -> Result<ExecutionOutput, ExecutionError> {
336        let stack_outputs = self.execute_impl(program, host, tracer).await?;
337
338        Ok(ExecutionOutput {
339            stack: stack_outputs,
340            advice: self.advice,
341            memory: self.memory,
342            final_pc_transcript: self.pc_transcript,
343        })
344    }
345
346    /// Executes the given program with the provided tracer and returns the stack outputs.
347    ///
348    /// This function takes a `&mut self` (compared to `self` for the public execute functions) so
349    /// that the processor state may be accessed after execution. It is incorrect to execute a
350    /// second program using the same processor. This is mainly meant to be used in tests.
351    async fn execute_impl(
352        &mut self,
353        program: &Program,
354        host: &mut impl AsyncHost,
355        tracer: &mut impl Tracer,
356    ) -> Result<StackOutputs, ExecutionError> {
357        let mut continuation_stack = ContinuationStack::new(program);
358        let mut current_forest = program.mast_forest().clone();
359
360        // Merge the program's advice map into the advice provider
361        self.advice
362            .extend_map(current_forest.advice_map())
363            .map_err(|err| ExecutionError::advice_error(err, self.clk, &()))?;
364
365        while let Some(continuation) = continuation_stack.pop_continuation() {
366            match continuation {
367                Continuation::StartNode(node_id) => {
368                    let node = current_forest.get_node_by_id(node_id).unwrap();
369
370                    match node {
371                        MastNode::Block(basic_block_node) => {
372                            self.execute_basic_block_node(
373                                basic_block_node,
374                                node_id,
375                                &current_forest,
376                                host,
377                                &mut continuation_stack,
378                                &current_forest,
379                                tracer,
380                            )
381                            .await?
382                        },
383                        MastNode::Join(join_node) => self.start_join_node(
384                            join_node,
385                            node_id,
386                            &current_forest,
387                            &mut continuation_stack,
388                            host,
389                            tracer,
390                        )?,
391                        MastNode::Split(split_node) => self.start_split_node(
392                            split_node,
393                            node_id,
394                            &current_forest,
395                            &mut continuation_stack,
396                            host,
397                            tracer,
398                        )?,
399                        MastNode::Loop(loop_node) => self.start_loop_node(
400                            loop_node,
401                            node_id,
402                            &current_forest,
403                            &mut continuation_stack,
404                            host,
405                            tracer,
406                        )?,
407                        MastNode::Call(call_node) => self.start_call_node(
408                            call_node,
409                            node_id,
410                            program,
411                            &current_forest,
412                            &mut continuation_stack,
413                            host,
414                            tracer,
415                        )?,
416                        MastNode::Dyn(_) => {
417                            self.start_dyn_node(
418                                node_id,
419                                &mut current_forest,
420                                &mut continuation_stack,
421                                host,
422                                tracer,
423                            )
424                            .await?
425                        },
426                        MastNode::External(_external_node) => {
427                            self.execute_external_node(
428                                node_id,
429                                &mut current_forest,
430                                &mut continuation_stack,
431                                host,
432                                tracer,
433                            )
434                            .await?
435                        },
436                    }
437                },
438                Continuation::FinishJoin(node_id) => self.finish_join_node(
439                    node_id,
440                    &current_forest,
441                    &mut continuation_stack,
442                    host,
443                    tracer,
444                )?,
445                Continuation::FinishSplit(node_id) => self.finish_split_node(
446                    node_id,
447                    &current_forest,
448                    &mut continuation_stack,
449                    host,
450                    tracer,
451                )?,
452                Continuation::FinishLoop(node_id) => self.finish_loop_node(
453                    node_id,
454                    &current_forest,
455                    &mut continuation_stack,
456                    host,
457                    tracer,
458                )?,
459                Continuation::FinishCall(node_id) => self.finish_call_node(
460                    node_id,
461                    &current_forest,
462                    &mut continuation_stack,
463                    host,
464                    tracer,
465                )?,
466                Continuation::FinishDyn(node_id) => self.finish_dyn_node(
467                    node_id,
468                    &current_forest,
469                    &mut continuation_stack,
470                    host,
471                    tracer,
472                )?,
473                Continuation::FinishExternal(node_id) => {
474                    // Execute after_exit decorators when returning from an external node
475                    // Note: current_forest should already be restored by EnterForest continuation
476                    self.execute_after_exit_decorators(node_id, &current_forest, host)?;
477                },
478                Continuation::EnterForest(previous_forest) => {
479                    // Restore the previous forest
480                    current_forest = previous_forest;
481                },
482            }
483        }
484
485        StackOutputs::new(
486            self.stack[self.stack_bot_idx..self.stack_top_idx]
487                .iter()
488                .rev()
489                .copied()
490                .collect(),
491        )
492        .map_err(|_| {
493            ExecutionError::OutputStackOverflow(
494                self.stack_top_idx - self.stack_bot_idx - MIN_STACK_DEPTH,
495            )
496        })
497    }
498
499    // DECORATOR EXECUTORS
500    // --------------------------------------------------------------------------------------------
501
502    /// Executes the decorators that should be executed before entering a node.
503    fn execute_before_enter_decorators(
504        &mut self,
505        node_id: MastNodeId,
506        current_forest: &MastForest,
507        host: &mut impl AsyncHost,
508    ) -> Result<(), ExecutionError> {
509        let node = current_forest
510            .get_node_by_id(node_id)
511            .expect("internal error: node id {node_id} not found in current forest");
512
513        for &decorator_id in node.before_enter(current_forest) {
514            self.execute_decorator(&current_forest[decorator_id], host)?;
515        }
516
517        Ok(())
518    }
519
520    /// Executes the decorators that should be executed after exiting a node.
521    fn execute_after_exit_decorators(
522        &mut self,
523        node_id: MastNodeId,
524        current_forest: &MastForest,
525        host: &mut impl AsyncHost,
526    ) -> Result<(), ExecutionError> {
527        let node = current_forest
528            .get_node_by_id(node_id)
529            .expect("internal error: node id {node_id} not found in current forest");
530
531        for &decorator_id in node.after_exit(current_forest) {
532            self.execute_decorator(&current_forest[decorator_id], host)?;
533        }
534
535        Ok(())
536    }
537
538    /// Executes the specified decorator
539    fn execute_decorator(
540        &mut self,
541        decorator: &Decorator,
542        host: &mut impl AsyncHost,
543    ) -> Result<(), ExecutionError> {
544        match decorator {
545            Decorator::Debug(options) => {
546                if self.in_debug_mode {
547                    let clk = self.clk;
548                    let process = &mut self.state();
549                    host.on_debug(process, options)
550                        .map_err(|err| ExecutionError::DebugHandlerError { clk, err })?;
551                }
552            },
553            Decorator::AsmOp(_assembly_op) => {
554                // do nothing
555            },
556            Decorator::Trace(id) => {
557                let clk = self.clk;
558                let process = &mut self.state();
559                host.on_trace(process, *id).map_err(|err| ExecutionError::TraceHandlerError {
560                    clk,
561                    trace_id: *id,
562                    err,
563                })?;
564            },
565        };
566        Ok(())
567    }
568
569    // HELPERS
570    // ----------------------------------------------------------------------------------------------
571
572    /// Increments the clock by 1.
573    #[inline(always)]
574    fn increment_clk(&mut self, tracer: &mut impl Tracer) {
575        self.clk += 1_u32;
576
577        tracer.increment_clk();
578    }
579
580    async fn load_mast_forest<E>(
581        &mut self,
582        node_digest: Word,
583        host: &mut impl AsyncHost,
584        get_mast_forest_failed: impl Fn(Word, &E) -> ExecutionError,
585        err_ctx: &E,
586    ) -> Result<(MastNodeId, Arc<MastForest>), ExecutionError>
587    where
588        E: ErrorContext,
589    {
590        let mast_forest = host
591            .get_mast_forest(&node_digest)
592            .await
593            .ok_or_else(|| get_mast_forest_failed(node_digest, err_ctx))?;
594
595        // We limit the parts of the program that can be called externally to procedure
596        // roots, even though MAST doesn't have that restriction.
597        let root_id = mast_forest
598            .find_procedure_root(node_digest)
599            .ok_or(ExecutionError::malfored_mast_forest_in_host(node_digest, err_ctx))?;
600
601        // Merge the advice map of this forest into the advice provider.
602        // Note that the map may be merged multiple times if a different procedure from the same
603        // forest is called.
604        // For now, only compiled libraries contain non-empty advice maps, so for most cases,
605        // this call will be cheap.
606        self.advice
607            .extend_map(mast_forest.advice_map())
608            .map_err(|err| ExecutionError::advice_error(err, self.clk, err_ctx))?;
609
610        Ok((root_id, mast_forest))
611    }
612
613    /// Increments the stack top pointer by 1.
614    ///
615    /// The bottom of the stack is never affected by this operation.
616    #[inline(always)]
617    fn increment_stack_size(&mut self, tracer: &mut impl Tracer) {
618        tracer.increment_stack_size(self);
619
620        self.stack_top_idx += 1;
621    }
622
623    /// Decrements the stack top pointer by 1.
624    ///
625    /// The bottom of the stack is only decremented in cases where the stack depth would become less
626    /// than 16.
627    #[inline(always)]
628    fn decrement_stack_size(&mut self, tracer: &mut impl Tracer) {
629        if self.stack_top_idx == MIN_STACK_DEPTH {
630            // We no longer have any room in the stack buffer to decrement the stack size (which
631            // would cause the `stack_bot_idx` to go below 0). We therefore reset the stack to its
632            // original position.
633            self.reset_stack_in_buffer(INITIAL_STACK_TOP_IDX);
634        }
635
636        self.stack_top_idx -= 1;
637        self.stack_bot_idx = min(self.stack_bot_idx, self.stack_top_idx - MIN_STACK_DEPTH);
638
639        tracer.decrement_stack_size();
640    }
641
642    /// Resets the stack in the buffer to a new position, preserving the top 16 elements of the
643    /// stack.
644    ///
645    /// # Preconditions
646    /// - The stack is expected to have exactly 16 elements.
647    #[inline(always)]
648    fn reset_stack_in_buffer(&mut self, new_stack_top_idx: usize) {
649        debug_assert_eq!(self.stack_depth(), MIN_STACK_DEPTH as u32);
650
651        let new_stack_bot_idx = new_stack_top_idx - MIN_STACK_DEPTH;
652
653        // Copy stack to its new position
654        self.stack
655            .copy_within(self.stack_bot_idx..self.stack_top_idx, new_stack_bot_idx);
656
657        // Zero out stack below the new new_stack_bot_idx, since this is where overflow values
658        // come from, and are guaranteed to be ZERO. We don't need to zero out above
659        // `stack_top_idx`, since values there are never read before being written.
660        self.stack[0..new_stack_bot_idx].fill(ZERO);
661
662        // Update indices.
663        self.stack_bot_idx = new_stack_bot_idx;
664        self.stack_top_idx = new_stack_top_idx;
665    }
666
667    // TESTING
668    // ----------------------------------------------------------------------------------------------
669
670    /// Convenience sync wrapper to [Self::execute] for testing purposes.
671    #[cfg(any(test, feature = "testing"))]
672    pub fn execute_sync(
673        self,
674        program: &Program,
675        host: &mut impl AsyncHost,
676    ) -> Result<StackOutputs, ExecutionError> {
677        // Create a new Tokio runtime and block on the async execution
678        let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
679
680        let execution_output = rt.block_on(self.execute(program, host))?;
681
682        Ok(execution_output.stack)
683    }
684
685    /// Convenience sync wrapper to [Self::execute_for_trace] for testing purposes.
686    #[cfg(any(test, feature = "testing"))]
687    pub fn execute_for_trace_sync(
688        self,
689        program: &Program,
690        host: &mut impl AsyncHost,
691        fragment_size: usize,
692    ) -> Result<(ExecutionOutput, TraceGenerationContext), ExecutionError> {
693        // Create a new Tokio runtime and block on the async execution
694        let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
695
696        rt.block_on(self.execute_for_trace(program, host, fragment_size))
697    }
698
699    /// Similar to [Self::execute_sync], but allows mutable access to the processor.
700    #[cfg(any(test, feature = "testing"))]
701    pub fn execute_sync_mut(
702        &mut self,
703        program: &Program,
704        host: &mut impl AsyncHost,
705    ) -> Result<StackOutputs, ExecutionError> {
706        // Create a new Tokio runtime and block on the async execution
707        let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
708
709        rt.block_on(self.execute_impl(program, host, &mut NoopTracer))
710    }
711}
712
713// EXECUTION OUTPUT
714// ===============================================================================================
715
716/// The output of a program execution, containing the state of the stack, advice provider,
717/// memory, and final precompile transcript at the end of execution.
718#[derive(Debug)]
719pub struct ExecutionOutput {
720    pub stack: StackOutputs,
721    pub advice: AdviceProvider,
722    pub memory: Memory,
723    pub final_pc_transcript: PrecompileTranscript,
724}
725
726// FAST PROCESS STATE
727// ===============================================================================================
728
729#[derive(Debug)]
730pub struct FastProcessState<'a> {
731    pub(super) processor: &'a mut FastProcessor,
732}
733
734impl FastProcessor {
735    #[inline(always)]
736    pub fn state(&mut self) -> ProcessState<'_> {
737        ProcessState::Fast(FastProcessState { processor: self })
738    }
739}
740
741// EXECUTION CONTEXT INFO
742// ===============================================================================================
743
744/// Information about the execution context.
745///
746/// This struct is used to keep track of the information needed to return to the previous context
747/// upon return from a `call`, `syscall` or `dyncall`.
748#[derive(Debug)]
749struct ExecutionContextInfo {
750    /// This stores all the elements on the stack at the call site, excluding the top 16 elements.
751    /// This corresponds to the overflow table in [crate::Process].
752    overflow_stack: Vec<Felt>,
753    ctx: ContextId,
754    fn_hash: Word,
755}