miden_processor/fast/
mod.rs

1#[cfg(test)]
2use alloc::rc::Rc;
3use alloc::{boxed::Box, sync::Arc, vec::Vec};
4#[cfg(test)]
5use core::cell::Cell;
6use core::cmp::min;
7
8use memory::Memory;
9use miden_air::{Felt, RowIndex};
10use miden_core::{
11    Decorator, EMPTY_WORD, Program, StackOutputs, WORD_SIZE, Word, ZERO,
12    mast::{MastForest, MastNode, MastNodeExt, MastNodeId},
13    precompile::PrecompileTranscript,
14    stack::MIN_STACK_DEPTH,
15    utils::range,
16};
17
18use crate::{
19    AdviceInputs, AdviceProvider, AsyncHost, ContextId, ErrorContext, ExecutionError, ProcessState,
20    chiplets::Ace,
21    continuation_stack::{Continuation, ContinuationStack},
22    fast::execution_tracer::{ExecutionTracer, TraceGenerationContext},
23};
24
25pub mod execution_tracer;
26mod memory;
27mod operation;
28pub use operation::eval_circuit_fast_;
29pub mod trace_state;
30mod tracer;
31pub use tracer::{NoopTracer, Tracer};
32
33mod basic_block;
34mod call_and_dyn;
35mod external;
36mod join;
37mod r#loop;
38mod split;
39
40#[cfg(test)]
41mod tests;
42
43/// The size of the stack buffer.
44///
45/// Note: This value is much larger than it needs to be for the majority of programs. However, some
46/// existing programs need it, so we're forced to push it up (though this should be double-checked).
47/// At this high a value, we're starting to see some performance degradation on benchmarks. For
48/// example, the blake3 benchmark went from 285 MHz to 250 MHz (~10% degradation). Perhaps a better
49/// solution would be to make this value much smaller (~1000), and then fallback to a `Vec` if the
50/// stack overflows.
51const STACK_BUFFER_SIZE: usize = 6850;
52
53/// The initial position of the top of the stack in the stack buffer.
54///
55/// We place this value close to 0 because if a program hits the limit, it's much more likely to hit
56/// the upper bound than the lower bound, since hitting the lower bound only occurs when you drop
57/// 0's that were generated automatically to keep the stack depth at 16. In practice, if this
58/// occurs, it is most likely a bug.
59const INITIAL_STACK_TOP_IDX: usize = 250;
60
61/// A fast processor which doesn't generate any trace.
62///
63/// This processor is designed to be as fast as possible. Hence, it only keeps track of the current
64/// state of the processor (i.e. the stack, current clock cycle, current memory context, and free
65/// memory pointer).
66///
67/// # Stack Management
68/// A few key points about how the stack was designed for maximum performance:
69///
70/// - The stack has a fixed buffer size defined by `STACK_BUFFER_SIZE`.
71///     - This was observed to increase performance by at least 2x compared to using a `Vec` with
72///       `push()` & `pop()`.
73///     - We track the stack top and bottom using indices `stack_top_idx` and `stack_bot_idx`,
74///       respectively.
75/// - Since we are using a fixed-size buffer, we need to ensure that stack buffer accesses are not
76///   out of bounds. Naively, we could check for this on every access. However, every operation
77///   alters the stack depth by a predetermined amount, allowing us to precisely determine the
78///   minimum number of operations required to reach a stack buffer boundary, whether at the top or
79///   bottom.
80///     - For example, if the stack top is 10 elements away from the top boundary, and the stack
81///       bottom is 15 elements away from the bottom boundary, then we can safely execute 10
82///       operations that modify the stack depth with no bounds check.
83/// - When switching contexts (e.g., during a call or syscall), all elements past the first 16 are
84///   stored in an `ExecutionContextInfo` struct, and the stack is truncated to 16 elements. This
85///   will be restored when returning from the call or syscall.
86///
87/// # Clock Cycle Management
88/// - The clock cycle (`clk`) is managed in the same way as in `Process`. That is, it is incremented
89///   by 1 for every row that `Process` adds to the main trace.
90///     - It is important to do so because the clock cycle is used to determine the context ID for
91///       new execution contexts when using `call` or `dyncall`.
92#[derive(Debug)]
93pub struct FastProcessor {
94    /// The stack is stored in reverse order, so that the last element is at the top of the stack.
95    pub(super) stack: Box<[Felt; STACK_BUFFER_SIZE]>,
96    /// The index of the top of the stack.
97    stack_top_idx: usize,
98    /// The index of the bottom of the stack.
99    stack_bot_idx: usize,
100
101    /// The current clock cycle.
102    pub(super) clk: RowIndex,
103
104    /// The current context ID.
105    pub(super) ctx: ContextId,
106
107    /// The hash of the function that called into the current context, or `[ZERO, ZERO, ZERO,
108    /// ZERO]` if we are in the first context (i.e. when `call_stack` is empty).
109    pub(super) caller_hash: Word,
110
111    /// The advice provider to be used during execution.
112    pub(super) advice: AdviceProvider,
113
114    /// A map from (context_id, word_address) to the word stored starting at that memory location.
115    pub(super) memory: Memory,
116
117    /// A map storing metadata per call to the ACE chiplet.
118    pub(super) ace: Ace,
119
120    /// The call stack is used when starting a new execution context (from a `call`, `syscall` or
121    /// `dyncall`) to keep track of the information needed to return to the previous context upon
122    /// return. It is a stack since calls can be nested.
123    call_stack: Vec<ExecutionContextInfo>,
124
125    /// Whether to enable debug statements and tracing.
126    in_debug_mode: bool,
127
128    /// Transcript used to record commitments via `log_precompile` instruction (implemented via RPO
129    /// sponge).
130    pc_transcript: PrecompileTranscript,
131
132    /// Tracks decorator retrieval calls for testing.
133    #[cfg(test)]
134    pub decorator_retrieval_count: Rc<Cell<usize>>,
135}
136
137impl FastProcessor {
138    // CONSTRUCTORS
139    // ----------------------------------------------------------------------------------------------
140
141    /// Creates a new `FastProcessor` instance with the given stack inputs.
142    ///
143    /// # Panics
144    /// - Panics if the length of `stack_inputs` is greater than `MIN_STACK_DEPTH`.
145    pub fn new(stack_inputs: &[Felt]) -> Self {
146        Self::initialize(stack_inputs, AdviceInputs::default(), false)
147    }
148
149    /// Creates a new `FastProcessor` instance with the given stack and advice inputs.
150    ///
151    /// # Panics
152    /// - Panics if the length of `stack_inputs` is greater than `MIN_STACK_DEPTH`.
153    pub fn new_with_advice_inputs(stack_inputs: &[Felt], advice_inputs: AdviceInputs) -> Self {
154        Self::initialize(stack_inputs, advice_inputs, false)
155    }
156
157    /// Creates a new `FastProcessor` instance, set to debug mode, with the given stack
158    /// and advice inputs.
159    ///
160    /// # Panics
161    /// - Panics if the length of `stack_inputs` is greater than `MIN_STACK_DEPTH`.
162    pub fn new_debug(stack_inputs: &[Felt], advice_inputs: AdviceInputs) -> Self {
163        Self::initialize(stack_inputs, advice_inputs, true)
164    }
165
166    /// Generic constructor unifying the above public ones.
167    ///
168    /// The stack inputs are expected to be stored in reverse order. For example, if `stack_inputs =
169    /// [1,2,3]`, then the stack will be initialized as `[3,2,1,0,0,...]`, with `3` being on
170    /// top.
171    fn initialize(stack_inputs: &[Felt], advice_inputs: AdviceInputs, in_debug_mode: bool) -> Self {
172        assert!(stack_inputs.len() <= MIN_STACK_DEPTH);
173
174        let stack_top_idx = INITIAL_STACK_TOP_IDX;
175        let stack = {
176            // Note: we use `Vec::into_boxed_slice()` here, since `Box::new([T; N])` first allocates
177            // the array on the stack, and then moves it to the heap. This might cause a
178            // stack overflow on some systems.
179            let mut stack: Box<[Felt; STACK_BUFFER_SIZE]> =
180                vec![ZERO; STACK_BUFFER_SIZE].into_boxed_slice().try_into().unwrap();
181            let bottom_idx = stack_top_idx - stack_inputs.len();
182
183            stack[bottom_idx..stack_top_idx].copy_from_slice(stack_inputs);
184            stack
185        };
186
187        Self {
188            advice: advice_inputs.into(),
189            stack,
190            stack_top_idx,
191            stack_bot_idx: stack_top_idx - MIN_STACK_DEPTH,
192            clk: 0_u32.into(),
193            ctx: 0_u32.into(),
194            caller_hash: EMPTY_WORD,
195            memory: Memory::new(),
196            call_stack: Vec::new(),
197            ace: Ace::default(),
198            in_debug_mode,
199            pc_transcript: PrecompileTranscript::new(),
200            #[cfg(test)]
201            decorator_retrieval_count: Rc::new(Cell::new(0)),
202        }
203    }
204
205    // ACCESSORS
206    // -------------------------------------------------------------------------------------------
207
208    #[cfg(test)]
209    #[inline(always)]
210    fn record_decorator_retrieval(&self) {
211        self.decorator_retrieval_count.set(self.decorator_retrieval_count.get() + 1);
212    }
213
214    /// Returns the size of the stack.
215    #[inline(always)]
216    fn stack_size(&self) -> usize {
217        self.stack_top_idx - self.stack_bot_idx
218    }
219
220    /// Returns the stack, such that the top of the stack is at the last index of the returned
221    /// slice.
222    pub fn stack(&self) -> &[Felt] {
223        &self.stack[self.stack_bot_idx..self.stack_top_idx]
224    }
225
226    /// Returns the top 16 elements of the stack.
227    pub fn stack_top(&self) -> &[Felt] {
228        &self.stack[self.stack_top_idx - MIN_STACK_DEPTH..self.stack_top_idx]
229    }
230
231    /// Returns a mutable reference to the top 16 elements of the stack.
232    pub fn stack_top_mut(&mut self) -> &mut [Felt] {
233        &mut self.stack[self.stack_top_idx - MIN_STACK_DEPTH..self.stack_top_idx]
234    }
235
236    /// Returns the element on the stack at index `idx`.
237    #[inline(always)]
238    pub fn stack_get(&self, idx: usize) -> Felt {
239        self.stack[self.stack_top_idx - idx - 1]
240    }
241
242    /// Mutable variant of `stack_get()`.
243    #[inline(always)]
244    pub fn stack_get_mut(&mut self, idx: usize) -> &mut Felt {
245        &mut self.stack[self.stack_top_idx - idx - 1]
246    }
247
248    /// Returns the word on the stack starting at index `start_idx` in "stack order".
249    ///
250    /// That is, for `start_idx=0` the top element of the stack will be at the last position in the
251    /// word.
252    ///
253    /// For example, if the stack looks like this:
254    ///
255    /// top                                                       bottom
256    /// v                                                           v
257    /// a | b | c | d | e | f | g | h | i | j | k | l | m | n | o | p
258    ///
259    /// Then
260    /// - `stack_get_word(0)` returns `[d, c, b, a]`,
261    /// - `stack_get_word(1)` returns `[e, d, c ,b]`,
262    /// - etc.
263    #[inline(always)]
264    pub fn stack_get_word(&self, start_idx: usize) -> Word {
265        // Ensure we have enough elements to form a complete word
266        debug_assert!(
267            start_idx + WORD_SIZE <= self.stack_depth() as usize,
268            "Not enough elements on stack to read word starting at index {start_idx}"
269        );
270
271        let word_start_idx = self.stack_top_idx - start_idx - 4;
272        let result: [Felt; WORD_SIZE] =
273            self.stack[range(word_start_idx, WORD_SIZE)].try_into().unwrap();
274        result.into()
275    }
276
277    /// Returns the number of elements on the stack in the current context.
278    #[inline(always)]
279    pub fn stack_depth(&self) -> u32 {
280        (self.stack_top_idx - self.stack_bot_idx) as u32
281    }
282
283    // MUTATORS
284    // -------------------------------------------------------------------------------------------
285
286    /// Writes an element to the stack at the given index.
287    #[inline(always)]
288    pub fn stack_write(&mut self, idx: usize, element: Felt) {
289        self.stack[self.stack_top_idx - idx - 1] = element
290    }
291
292    /// Writes a word to the stack starting at the given index.
293    ///
294    /// The index is the index of the first element of the word, and the word is written in reverse
295    /// order.
296    #[inline(always)]
297    pub fn stack_write_word(&mut self, start_idx: usize, word: &Word) {
298        debug_assert!(start_idx < MIN_STACK_DEPTH);
299
300        let word_start_idx = self.stack_top_idx - start_idx - 4;
301        let source: [Felt; WORD_SIZE] = (*word).into();
302        self.stack[range(word_start_idx, WORD_SIZE)].copy_from_slice(&source)
303    }
304
305    /// Swaps the elements at the given indices on the stack.
306    #[inline(always)]
307    pub fn stack_swap(&mut self, idx1: usize, idx2: usize) {
308        let a = self.stack_get(idx1);
309        let b = self.stack_get(idx2);
310        self.stack_write(idx1, b);
311        self.stack_write(idx2, a);
312    }
313
314    // EXECUTE
315    // -------------------------------------------------------------------------------------------
316
317    /// Executes the given program and returns the stack outputs as well as the advice provider.
318    pub async fn execute(
319        self,
320        program: &Program,
321        host: &mut impl AsyncHost,
322    ) -> Result<ExecutionOutput, ExecutionError> {
323        self.execute_with_tracer(program, host, &mut NoopTracer).await
324    }
325
326    /// Executes the given program and returns the stack outputs, the advice provider, and
327    /// context necessary to build the trace.
328    pub async fn execute_for_trace(
329        self,
330        program: &Program,
331        host: &mut impl AsyncHost,
332        fragment_size: usize,
333    ) -> Result<(ExecutionOutput, TraceGenerationContext), ExecutionError> {
334        let mut tracer = ExecutionTracer::new(fragment_size);
335        let execution_output = self.execute_with_tracer(program, host, &mut tracer).await?;
336
337        // Pass the final precompile transcript from execution output to the trace generation
338        // context
339        let context = tracer.into_trace_generation_context(execution_output.final_pc_transcript);
340
341        Ok((execution_output, context))
342    }
343
344    /// Executes the given program with the provided tracer and returns the stack outputs, and the
345    /// advice provider.
346    pub async fn execute_with_tracer(
347        mut self,
348        program: &Program,
349        host: &mut impl AsyncHost,
350        tracer: &mut impl Tracer,
351    ) -> Result<ExecutionOutput, ExecutionError> {
352        let stack_outputs = self.execute_impl(program, host, tracer).await?;
353
354        Ok(ExecutionOutput {
355            stack: stack_outputs,
356            advice: self.advice,
357            memory: self.memory,
358            final_pc_transcript: self.pc_transcript,
359        })
360    }
361
362    /// Executes the given program with the provided tracer and returns the stack outputs.
363    ///
364    /// This function takes a `&mut self` (compared to `self` for the public execute functions) so
365    /// that the processor state may be accessed after execution. It is incorrect to execute a
366    /// second program using the same processor. This is mainly meant to be used in tests.
367    async fn execute_impl(
368        &mut self,
369        program: &Program,
370        host: &mut impl AsyncHost,
371        tracer: &mut impl Tracer,
372    ) -> Result<StackOutputs, ExecutionError> {
373        let mut continuation_stack = ContinuationStack::new(program);
374        let mut current_forest = program.mast_forest().clone();
375
376        // Merge the program's advice map into the advice provider
377        self.advice
378            .extend_map(current_forest.advice_map())
379            .map_err(|err| ExecutionError::advice_error(err, self.clk, &()))?;
380
381        while let Some(continuation) = continuation_stack.pop_continuation() {
382            match continuation {
383                Continuation::StartNode(node_id) => {
384                    let node = current_forest.get_node_by_id(node_id).unwrap();
385
386                    match node {
387                        MastNode::Block(basic_block_node) => {
388                            self.execute_basic_block_node(
389                                basic_block_node,
390                                node_id,
391                                &current_forest,
392                                host,
393                                &mut continuation_stack,
394                                &current_forest,
395                                tracer,
396                            )
397                            .await?
398                        },
399                        MastNode::Join(join_node) => self.start_join_node(
400                            join_node,
401                            node_id,
402                            &current_forest,
403                            &mut continuation_stack,
404                            host,
405                            tracer,
406                        )?,
407                        MastNode::Split(split_node) => self.start_split_node(
408                            split_node,
409                            node_id,
410                            &current_forest,
411                            &mut continuation_stack,
412                            host,
413                            tracer,
414                        )?,
415                        MastNode::Loop(loop_node) => self.start_loop_node(
416                            loop_node,
417                            node_id,
418                            &current_forest,
419                            &mut continuation_stack,
420                            host,
421                            tracer,
422                        )?,
423                        MastNode::Call(call_node) => self.start_call_node(
424                            call_node,
425                            node_id,
426                            program,
427                            &current_forest,
428                            &mut continuation_stack,
429                            host,
430                            tracer,
431                        )?,
432                        MastNode::Dyn(_) => {
433                            self.start_dyn_node(
434                                node_id,
435                                &mut current_forest,
436                                &mut continuation_stack,
437                                host,
438                                tracer,
439                            )
440                            .await?
441                        },
442                        MastNode::External(_external_node) => {
443                            self.execute_external_node(
444                                node_id,
445                                &mut current_forest,
446                                &mut continuation_stack,
447                                host,
448                                tracer,
449                            )
450                            .await?
451                        },
452                    }
453                },
454                Continuation::FinishJoin(node_id) => self.finish_join_node(
455                    node_id,
456                    &current_forest,
457                    &mut continuation_stack,
458                    host,
459                    tracer,
460                )?,
461                Continuation::FinishSplit(node_id) => self.finish_split_node(
462                    node_id,
463                    &current_forest,
464                    &mut continuation_stack,
465                    host,
466                    tracer,
467                )?,
468                Continuation::FinishLoop(node_id) => self.finish_loop_node(
469                    node_id,
470                    &current_forest,
471                    &mut continuation_stack,
472                    host,
473                    tracer,
474                )?,
475                Continuation::FinishCall(node_id) => self.finish_call_node(
476                    node_id,
477                    &current_forest,
478                    &mut continuation_stack,
479                    host,
480                    tracer,
481                )?,
482                Continuation::FinishDyn(node_id) => self.finish_dyn_node(
483                    node_id,
484                    &current_forest,
485                    &mut continuation_stack,
486                    host,
487                    tracer,
488                )?,
489                Continuation::FinishExternal(node_id) => {
490                    // Execute after_exit decorators when returning from an external node
491                    // Note: current_forest should already be restored by EnterForest continuation
492                    self.execute_after_exit_decorators(node_id, &current_forest, host)?;
493                },
494                Continuation::EnterForest(previous_forest) => {
495                    // Restore the previous forest
496                    current_forest = previous_forest;
497                },
498            }
499        }
500
501        StackOutputs::new(
502            self.stack[self.stack_bot_idx..self.stack_top_idx]
503                .iter()
504                .rev()
505                .copied()
506                .collect(),
507        )
508        .map_err(|_| {
509            ExecutionError::OutputStackOverflow(
510                self.stack_top_idx - self.stack_bot_idx - MIN_STACK_DEPTH,
511            )
512        })
513    }
514
515    // DECORATOR EXECUTORS
516    // --------------------------------------------------------------------------------------------
517
518    /// Executes the decorators that should be executed before entering a node.
519    fn execute_before_enter_decorators(
520        &mut self,
521        node_id: MastNodeId,
522        current_forest: &MastForest,
523        host: &mut impl AsyncHost,
524    ) -> Result<(), ExecutionError> {
525        if !self.in_debug_mode {
526            return Ok(());
527        }
528
529        #[cfg(test)]
530        self.record_decorator_retrieval();
531
532        let node = current_forest
533            .get_node_by_id(node_id)
534            .expect("internal error: node id {node_id} not found in current forest");
535
536        for &decorator_id in node.before_enter(current_forest) {
537            self.execute_decorator(&current_forest[decorator_id], host)?;
538        }
539
540        Ok(())
541    }
542
543    /// Executes the decorators that should be executed after exiting a node.
544    fn execute_after_exit_decorators(
545        &mut self,
546        node_id: MastNodeId,
547        current_forest: &MastForest,
548        host: &mut impl AsyncHost,
549    ) -> Result<(), ExecutionError> {
550        if !self.in_debug_mode {
551            return Ok(());
552        }
553
554        #[cfg(test)]
555        self.record_decorator_retrieval();
556
557        let node = current_forest
558            .get_node_by_id(node_id)
559            .expect("internal error: node id {node_id} not found in current forest");
560
561        for &decorator_id in node.after_exit(current_forest) {
562            self.execute_decorator(&current_forest[decorator_id], host)?;
563        }
564
565        Ok(())
566    }
567
568    /// Executes the specified decorator
569    fn execute_decorator(
570        &mut self,
571        decorator: &Decorator,
572        host: &mut impl AsyncHost,
573    ) -> Result<(), ExecutionError> {
574        match decorator {
575            Decorator::Debug(options) => {
576                if self.in_debug_mode {
577                    let clk = self.clk;
578                    let process = &mut self.state();
579                    host.on_debug(process, options)
580                        .map_err(|err| ExecutionError::DebugHandlerError { clk, err })?;
581                }
582            },
583            Decorator::AsmOp(_assembly_op) => {
584                // do nothing
585            },
586            Decorator::Trace(id) => {
587                let clk = self.clk;
588                let process = &mut self.state();
589                host.on_trace(process, *id).map_err(|err| ExecutionError::TraceHandlerError {
590                    clk,
591                    trace_id: *id,
592                    err,
593                })?;
594            },
595        };
596        Ok(())
597    }
598
599    // HELPERS
600    // ----------------------------------------------------------------------------------------------
601
602    /// Increments the clock by 1.
603    #[inline(always)]
604    fn increment_clk(&mut self, tracer: &mut impl Tracer) {
605        self.clk += 1_u32;
606
607        tracer.increment_clk();
608    }
609
610    async fn load_mast_forest<E>(
611        &mut self,
612        node_digest: Word,
613        host: &mut impl AsyncHost,
614        get_mast_forest_failed: impl Fn(Word, &E) -> ExecutionError,
615        err_ctx: &E,
616    ) -> Result<(MastNodeId, Arc<MastForest>), ExecutionError>
617    where
618        E: ErrorContext,
619    {
620        let mast_forest = host
621            .get_mast_forest(&node_digest)
622            .await
623            .ok_or_else(|| get_mast_forest_failed(node_digest, err_ctx))?;
624
625        // We limit the parts of the program that can be called externally to procedure
626        // roots, even though MAST doesn't have that restriction.
627        let root_id = mast_forest
628            .find_procedure_root(node_digest)
629            .ok_or(ExecutionError::malfored_mast_forest_in_host(node_digest, err_ctx))?;
630
631        // Merge the advice map of this forest into the advice provider.
632        // Note that the map may be merged multiple times if a different procedure from the same
633        // forest is called.
634        // For now, only compiled libraries contain non-empty advice maps, so for most cases,
635        // this call will be cheap.
636        self.advice
637            .extend_map(mast_forest.advice_map())
638            .map_err(|err| ExecutionError::advice_error(err, self.clk, err_ctx))?;
639
640        Ok((root_id, mast_forest))
641    }
642
643    /// Increments the stack top pointer by 1.
644    ///
645    /// The bottom of the stack is never affected by this operation.
646    #[inline(always)]
647    fn increment_stack_size(&mut self, tracer: &mut impl Tracer) {
648        tracer.increment_stack_size(self);
649
650        self.stack_top_idx += 1;
651    }
652
653    /// Decrements the stack top pointer by 1.
654    ///
655    /// The bottom of the stack is only decremented in cases where the stack depth would become less
656    /// than 16.
657    #[inline(always)]
658    fn decrement_stack_size(&mut self, tracer: &mut impl Tracer) {
659        if self.stack_top_idx == MIN_STACK_DEPTH {
660            // We no longer have any room in the stack buffer to decrement the stack size (which
661            // would cause the `stack_bot_idx` to go below 0). We therefore reset the stack to its
662            // original position.
663            self.reset_stack_in_buffer(INITIAL_STACK_TOP_IDX);
664        }
665
666        self.stack_top_idx -= 1;
667        self.stack_bot_idx = min(self.stack_bot_idx, self.stack_top_idx - MIN_STACK_DEPTH);
668
669        tracer.decrement_stack_size();
670    }
671
672    /// Resets the stack in the buffer to a new position, preserving the top 16 elements of the
673    /// stack.
674    ///
675    /// # Preconditions
676    /// - The stack is expected to have exactly 16 elements.
677    #[inline(always)]
678    fn reset_stack_in_buffer(&mut self, new_stack_top_idx: usize) {
679        debug_assert_eq!(self.stack_depth(), MIN_STACK_DEPTH as u32);
680
681        let new_stack_bot_idx = new_stack_top_idx - MIN_STACK_DEPTH;
682
683        // Copy stack to its new position
684        self.stack
685            .copy_within(self.stack_bot_idx..self.stack_top_idx, new_stack_bot_idx);
686
687        // Zero out stack below the new new_stack_bot_idx, since this is where overflow values
688        // come from, and are guaranteed to be ZERO. We don't need to zero out above
689        // `stack_top_idx`, since values there are never read before being written.
690        self.stack[0..new_stack_bot_idx].fill(ZERO);
691
692        // Update indices.
693        self.stack_bot_idx = new_stack_bot_idx;
694        self.stack_top_idx = new_stack_top_idx;
695    }
696
697    // TESTING
698    // ----------------------------------------------------------------------------------------------
699
700    /// Convenience sync wrapper to [Self::execute] for testing purposes.
701    #[cfg(any(test, feature = "testing"))]
702    pub fn execute_sync(
703        self,
704        program: &Program,
705        host: &mut impl AsyncHost,
706    ) -> Result<StackOutputs, ExecutionError> {
707        // Create a new Tokio runtime and block on the async execution
708        let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
709
710        let execution_output = rt.block_on(self.execute(program, host))?;
711
712        Ok(execution_output.stack)
713    }
714
715    /// Convenience sync wrapper to [Self::execute_for_trace] for testing purposes.
716    #[cfg(any(test, feature = "testing"))]
717    pub fn execute_for_trace_sync(
718        self,
719        program: &Program,
720        host: &mut impl AsyncHost,
721        fragment_size: usize,
722    ) -> Result<(ExecutionOutput, TraceGenerationContext), ExecutionError> {
723        // Create a new Tokio runtime and block on the async execution
724        let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
725
726        rt.block_on(self.execute_for_trace(program, host, fragment_size))
727    }
728
729    /// Similar to [Self::execute_sync], but allows mutable access to the processor.
730    #[cfg(any(test, feature = "testing"))]
731    pub fn execute_sync_mut(
732        &mut self,
733        program: &Program,
734        host: &mut impl AsyncHost,
735    ) -> Result<StackOutputs, ExecutionError> {
736        // Create a new Tokio runtime and block on the async execution
737        let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
738
739        rt.block_on(self.execute_impl(program, host, &mut NoopTracer))
740    }
741}
742
743// EXECUTION OUTPUT
744// ===============================================================================================
745
746/// The output of a program execution, containing the state of the stack, advice provider,
747/// memory, and final precompile transcript at the end of execution.
748#[derive(Debug)]
749pub struct ExecutionOutput {
750    pub stack: StackOutputs,
751    pub advice: AdviceProvider,
752    pub memory: Memory,
753    pub final_pc_transcript: PrecompileTranscript,
754}
755
756// FAST PROCESS STATE
757// ===============================================================================================
758
759#[derive(Debug)]
760pub struct FastProcessState<'a> {
761    pub(super) processor: &'a mut FastProcessor,
762}
763
764impl FastProcessor {
765    #[inline(always)]
766    pub fn state(&mut self) -> ProcessState<'_> {
767        ProcessState::Fast(FastProcessState { processor: self })
768    }
769}
770
771// EXECUTION CONTEXT INFO
772// ===============================================================================================
773
774/// Information about the execution context.
775///
776/// This struct is used to keep track of the information needed to return to the previous context
777/// upon return from a `call`, `syscall` or `dyncall`.
778#[derive(Debug)]
779struct ExecutionContextInfo {
780    /// This stores all the elements on the stack at the call site, excluding the top 16 elements.
781    /// This corresponds to the overflow table in [crate::Process].
782    overflow_stack: Vec<Felt>,
783    ctx: ContextId,
784    fn_hash: Word,
785}