miden_processor/fast/mod.rs
1use alloc::{sync::Arc, vec::Vec};
2use core::cmp::min;
3
4use memory::Memory;
5use miden_air::{Felt, RowIndex};
6use miden_core::{
7 Decorator, EMPTY_WORD, Program, StackOutputs, WORD_SIZE, Word, ZERO,
8 mast::{MastForest, MastNode, MastNodeExt, MastNodeId},
9 stack::MIN_STACK_DEPTH,
10 utils::range,
11};
12
13use crate::{
14 AdviceInputs, AdviceProvider, AsyncHost, ContextId, ErrorContext, ExecutionError, FMP_MIN,
15 ProcessState,
16 chiplets::Ace,
17 continuation_stack::{Continuation, ContinuationStack},
18 fast::{execution_tracer::ExecutionTracer, trace_state::TraceFragmentContext},
19};
20
21mod execution_tracer;
22mod memory;
23mod operation;
24pub mod trace_state;
25mod tracer;
26pub use tracer::{NoopTracer, Tracer};
27
28mod basic_block;
29mod call_and_dyn;
30mod external;
31mod join;
32mod r#loop;
33mod split;
34
35#[cfg(test)]
36mod tests;
37
38/// The size of the stack buffer.
39///
40/// Note: This value is much larger than it needs to be for the majority of programs. However, some
41/// existing programs need it (e.g. `std::math::secp256k1::group::gen_mul`), so we're forced to push
42/// it up. At this high a value, we're starting to see some performance degradation on benchmarks.
43/// For example, the blake3 benchmark went from 285 MHz to 250 MHz (~10% degradation). Perhaps a
44/// better solution would be to make this value much smaller (~1000), and then fallback to a `Vec`
45/// if the stack overflows.
46const STACK_BUFFER_SIZE: usize = 6850;
47
48/// The initial position of the top of the stack in the stack buffer.
49///
50/// We place this value close to 0 because if a program hits the limit, it's much more likely to hit
51/// the upper bound than the lower bound, since hitting the lower bound only occurs when you drop
52/// 0's that were generated automatically to keep the stack depth at 16. In practice, if this
53/// occurs, it is most likely a bug.
54const INITIAL_STACK_TOP_IDX: usize = 250;
55
56/// The number of rows per core trace fragment.
57pub const NUM_ROWS_PER_CORE_FRAGMENT: usize = 1024;
58
59/// A fast processor which doesn't generate any trace.
60///
61/// This processor is designed to be as fast as possible. Hence, it only keeps track of the current
62/// state of the processor (i.e. the stack, current clock cycle, current memory context, and free
63/// memory pointer).
64///
65/// # Stack Management
66/// A few key points about how the stack was designed for maximum performance:
67///
68/// - The stack has a fixed buffer size defined by `STACK_BUFFER_SIZE`.
69/// - This was observed to increase performance by at least 2x compared to using a `Vec` with
70/// `push()` & `pop()`.
71/// - We track the stack top and bottom using indices `stack_top_idx` and `stack_bot_idx`,
72/// respectively.
73/// - Since we are using a fixed-size buffer, we need to ensure that stack buffer accesses are not
74/// out of bounds. Naively, we could check for this on every access. However, every operation
75/// alters the stack depth by a predetermined amount, allowing us to precisely determine the
76/// minimum number of operations required to reach a stack buffer boundary, whether at the top or
77/// bottom.
78/// - For example, if the stack top is 10 elements away from the top boundary, and the stack
79/// bottom is 15 elements away from the bottom boundary, then we can safely execute 10
80/// operations that modify the stack depth with no bounds check.
81/// - When switching contexts (e.g., during a call or syscall), all elements past the first 16 are
82/// stored in an `ExecutionContextInfo` struct, and the stack is truncated to 16 elements. This
83/// will be restored when returning from the call or syscall.
84///
85/// # Clock Cycle Management
86/// - The clock cycle (`clk`) is managed in the same way as in `Process`. That is, it is incremented
87/// by 1 for every row that `Process` adds to the main trace.
88/// - It is important to do so because the clock cycle is used to determine the context ID for
89/// new execution contexts when using `call` or `dyncall`.
90#[derive(Debug)]
91pub struct FastProcessor {
92 /// The stack is stored in reverse order, so that the last element is at the top of the stack.
93 pub(super) stack: [Felt; STACK_BUFFER_SIZE],
94 /// The index of the top of the stack.
95 stack_top_idx: usize,
96 /// The index of the bottom of the stack.
97 stack_bot_idx: usize,
98
99 /// The current clock cycle.
100 pub(super) clk: RowIndex,
101
102 /// The current context ID.
103 pub(super) ctx: ContextId,
104
105 /// The free memory pointer.
106 pub(super) fmp: Felt,
107
108 /// Whether we are currently in a syscall.
109 in_syscall: bool,
110
111 /// The hash of the function that called into the current context, or `[ZERO, ZERO, ZERO,
112 /// ZERO]` if we are in the first context (i.e. when `call_stack` is empty).
113 pub(super) caller_hash: Word,
114
115 /// The advice provider to be used during execution.
116 pub(super) advice: AdviceProvider,
117
118 /// A map from (context_id, word_address) to the word stored starting at that memory location.
119 pub(super) memory: Memory,
120
121 /// A map storing metadata per call to the ACE chiplet.
122 pub(super) ace: Ace,
123
124 /// The call stack is used when starting a new execution context (from a `call`, `syscall` or
125 /// `dyncall`) to keep track of the information needed to return to the previous context upon
126 /// return. It is a stack since calls can be nested.
127 call_stack: Vec<ExecutionContextInfo>,
128
129 /// Whether to enable debug statements and tracing.
130 in_debug_mode: bool,
131}
132
133impl FastProcessor {
134 // CONSTRUCTORS
135 // ----------------------------------------------------------------------------------------------
136
137 /// Creates a new `FastProcessor` instance with the given stack inputs.
138 ///
139 /// # Panics
140 /// - Panics if the length of `stack_inputs` is greater than `MIN_STACK_DEPTH`.
141 pub fn new(stack_inputs: &[Felt]) -> Self {
142 Self::initialize(stack_inputs, AdviceInputs::default(), false)
143 }
144
145 /// Creates a new `FastProcessor` instance with the given stack and advice inputs.
146 ///
147 /// # Panics
148 /// - Panics if the length of `stack_inputs` is greater than `MIN_STACK_DEPTH`.
149 pub fn new_with_advice_inputs(stack_inputs: &[Felt], advice_inputs: AdviceInputs) -> Self {
150 Self::initialize(stack_inputs, advice_inputs, false)
151 }
152
153 /// Creates a new `FastProcessor` instance, set to debug mode, with the given stack
154 /// and advice inputs.
155 ///
156 /// # Panics
157 /// - Panics if the length of `stack_inputs` is greater than `MIN_STACK_DEPTH`.
158 pub fn new_debug(stack_inputs: &[Felt], advice_inputs: AdviceInputs) -> Self {
159 Self::initialize(stack_inputs, advice_inputs, true)
160 }
161
162 /// Generic constructor unifying the above public ones.
163 ///
164 /// The stack inputs are expected to be stored in reverse order. For example, if `stack_inputs =
165 /// [1,2,3]`, then the stack will be initialized as `[3,2,1,0,0,...]`, with `3` being on
166 /// top.
167 fn initialize(stack_inputs: &[Felt], advice_inputs: AdviceInputs, in_debug_mode: bool) -> Self {
168 assert!(stack_inputs.len() <= MIN_STACK_DEPTH);
169
170 let stack_top_idx = INITIAL_STACK_TOP_IDX;
171 let stack = {
172 let mut stack = [ZERO; STACK_BUFFER_SIZE];
173 let bottom_idx = stack_top_idx - stack_inputs.len();
174
175 stack[bottom_idx..stack_top_idx].copy_from_slice(stack_inputs);
176 stack
177 };
178
179 Self {
180 advice: advice_inputs.into(),
181 stack,
182 stack_top_idx,
183 stack_bot_idx: stack_top_idx - MIN_STACK_DEPTH,
184 clk: 0_u32.into(),
185 ctx: 0_u32.into(),
186 fmp: Felt::new(FMP_MIN),
187 in_syscall: false,
188 caller_hash: EMPTY_WORD,
189 memory: Memory::new(),
190 call_stack: Vec::new(),
191 ace: Ace::default(),
192 in_debug_mode,
193 }
194 }
195
196 // ACCESSORS
197 // -------------------------------------------------------------------------------------------
198
199 /// Returns the size of the stack.
200 #[inline(always)]
201 fn stack_size(&self) -> usize {
202 self.stack_top_idx - self.stack_bot_idx
203 }
204
205 /// Returns the stack, such that the top of the stack is at the last index of the returned
206 /// slice.
207 pub fn stack(&self) -> &[Felt] {
208 &self.stack[self.stack_bot_idx..self.stack_top_idx]
209 }
210
211 /// Returns the top 16 elements of the stack.
212 pub fn stack_top(&self) -> &[Felt] {
213 &self.stack[self.stack_top_idx - MIN_STACK_DEPTH..self.stack_top_idx]
214 }
215
216 /// Returns a mutable reference to the top 16 elements of the stack.
217 pub fn stack_top_mut(&mut self) -> &mut [Felt] {
218 &mut self.stack[self.stack_top_idx - MIN_STACK_DEPTH..self.stack_top_idx]
219 }
220
221 /// Returns the element on the stack at index `idx`.
222 #[inline(always)]
223 pub fn stack_get(&self, idx: usize) -> Felt {
224 self.stack[self.stack_top_idx - idx - 1]
225 }
226
227 /// Mutable variant of `stack_get()`.
228 #[inline(always)]
229 pub fn stack_get_mut(&mut self, idx: usize) -> &mut Felt {
230 &mut self.stack[self.stack_top_idx - idx - 1]
231 }
232
233 /// Returns the word on the stack starting at index `start_idx` in "stack order".
234 ///
235 /// That is, for `start_idx=0` the top element of the stack will be at the last position in the
236 /// word.
237 ///
238 /// For example, if the stack looks like this:
239 ///
240 /// top bottom
241 /// v v
242 /// a | b | c | d | e | f | g | h | i | j | k | l | m | n | o | p
243 ///
244 /// Then
245 /// - `stack_get_word(0)` returns `[d, c, b, a]`,
246 /// - `stack_get_word(1)` returns `[e, d, c ,b]`,
247 /// - etc.
248 #[inline(always)]
249 pub fn stack_get_word(&self, start_idx: usize) -> Word {
250 // Ensure we have enough elements to form a complete word
251 debug_assert!(
252 start_idx + WORD_SIZE <= self.stack_depth() as usize,
253 "Not enough elements on stack to read word starting at index {start_idx}"
254 );
255
256 let word_start_idx = self.stack_top_idx - start_idx - 4;
257 let result: [Felt; WORD_SIZE] =
258 self.stack[range(word_start_idx, WORD_SIZE)].try_into().unwrap();
259 result.into()
260 }
261
262 /// Returns the number of elements on the stack in the current context.
263 #[inline(always)]
264 pub fn stack_depth(&self) -> u32 {
265 (self.stack_top_idx - self.stack_bot_idx) as u32
266 }
267
268 // MUTATORS
269 // -------------------------------------------------------------------------------------------
270
271 /// Writes an element to the stack at the given index.
272 #[inline(always)]
273 pub fn stack_write(&mut self, idx: usize, element: Felt) {
274 self.stack[self.stack_top_idx - idx - 1] = element
275 }
276
277 /// Writes a word to the stack starting at the given index.
278 ///
279 /// The index is the index of the first element of the word, and the word is written in reverse
280 /// order.
281 #[inline(always)]
282 pub fn stack_write_word(&mut self, start_idx: usize, word: &Word) {
283 debug_assert!(start_idx < MIN_STACK_DEPTH);
284
285 let word_start_idx = self.stack_top_idx - start_idx - 4;
286 let source: [Felt; WORD_SIZE] = (*word).into();
287 self.stack[range(word_start_idx, WORD_SIZE)].copy_from_slice(&source)
288 }
289
290 /// Swaps the elements at the given indices on the stack.
291 #[inline(always)]
292 pub fn stack_swap(&mut self, idx1: usize, idx2: usize) {
293 let a = self.stack_get(idx1);
294 let b = self.stack_get(idx2);
295 self.stack_write(idx1, b);
296 self.stack_write(idx2, a);
297 }
298
299 // EXECUTE
300 // -------------------------------------------------------------------------------------------
301
302 /// Executes the given program and returns the stack outputs as well as the advice provider.
303 pub async fn execute(
304 self,
305 program: &Program,
306 host: &mut impl AsyncHost,
307 ) -> Result<ExecutionOutput, ExecutionError> {
308 self.execute_with_tracer(program, host, &mut NoopTracer).await
309 }
310
311 /// Executes the given program and returns the stack outputs, the advice provider, and
312 /// information for building the trace.
313 pub async fn execute_for_trace(
314 self,
315 program: &Program,
316 host: &mut impl AsyncHost,
317 ) -> Result<(ExecutionOutput, Vec<TraceFragmentContext>), ExecutionError> {
318 let mut tracer = ExecutionTracer::default();
319 let execution_output = self.execute_with_tracer(program, host, &mut tracer).await?;
320
321 Ok((execution_output, tracer.into_fragment_contexts()))
322 }
323
324 /// Executes the given program with the provided tracer and returns the stack outputs, and the
325 /// advice provider.
326 pub async fn execute_with_tracer(
327 mut self,
328 program: &Program,
329 host: &mut impl AsyncHost,
330 tracer: &mut impl Tracer,
331 ) -> Result<ExecutionOutput, ExecutionError> {
332 let stack_outputs = self.execute_impl(program, host, tracer).await?;
333
334 Ok(ExecutionOutput {
335 stack: stack_outputs,
336 advice: self.advice,
337 memory: self.memory,
338 })
339 }
340
341 /// Executes the given program with the provided tracer and returns the stack outputs.
342 ///
343 /// This function takes a `&mut self` (compared to `self` for the public execute functions) so
344 /// that the processor state may be accessed after execution. It is incorrect to execute a
345 /// second program using the same processor. This is mainly meant to be used in tests.
346 async fn execute_impl(
347 &mut self,
348 program: &Program,
349 host: &mut impl AsyncHost,
350 tracer: &mut impl Tracer,
351 ) -> Result<StackOutputs, ExecutionError> {
352 let mut continuation_stack = ContinuationStack::new(program);
353 let mut current_forest = program.mast_forest().clone();
354
355 // Merge the program's advice map into the advice provider
356 self.advice
357 .extend_map(current_forest.advice_map())
358 .map_err(|err| ExecutionError::advice_error(err, self.clk, &()))?;
359
360 while let Some(continuation) = continuation_stack.pop_continuation() {
361 match continuation {
362 Continuation::StartNode(node_id) => {
363 let node = current_forest.get_node_by_id(node_id).unwrap();
364
365 match node {
366 MastNode::Block(basic_block_node) => {
367 self.execute_basic_block_node(
368 basic_block_node,
369 node_id,
370 ¤t_forest,
371 host,
372 &mut continuation_stack,
373 ¤t_forest,
374 tracer,
375 )
376 .await?
377 },
378 MastNode::Join(join_node) => self.start_join_node(
379 join_node,
380 node_id,
381 ¤t_forest,
382 &mut continuation_stack,
383 host,
384 tracer,
385 )?,
386 MastNode::Split(split_node) => self.start_split_node(
387 split_node,
388 node_id,
389 ¤t_forest,
390 &mut continuation_stack,
391 host,
392 tracer,
393 )?,
394 MastNode::Loop(loop_node) => self.start_loop_node(
395 loop_node,
396 node_id,
397 ¤t_forest,
398 &mut continuation_stack,
399 host,
400 tracer,
401 )?,
402 MastNode::Call(call_node) => self.start_call_node(
403 call_node,
404 node_id,
405 program,
406 ¤t_forest,
407 &mut continuation_stack,
408 host,
409 tracer,
410 )?,
411 MastNode::Dyn(_) => {
412 self.start_dyn_node(
413 node_id,
414 &mut current_forest,
415 &mut continuation_stack,
416 host,
417 tracer,
418 )
419 .await?
420 },
421 MastNode::External(_external_node) => {
422 self.execute_external_node(
423 node_id,
424 &mut current_forest,
425 &mut continuation_stack,
426 host,
427 tracer,
428 )
429 .await?
430 },
431 }
432 },
433 Continuation::FinishJoin(node_id) => self.finish_join_node(
434 node_id,
435 ¤t_forest,
436 &mut continuation_stack,
437 host,
438 tracer,
439 )?,
440 Continuation::FinishSplit(node_id) => self.finish_split_node(
441 node_id,
442 ¤t_forest,
443 &mut continuation_stack,
444 host,
445 tracer,
446 )?,
447 Continuation::FinishLoop(node_id) => self.finish_loop_node(
448 node_id,
449 ¤t_forest,
450 &mut continuation_stack,
451 host,
452 tracer,
453 )?,
454 Continuation::FinishCall(node_id) => self.finish_call_node(
455 node_id,
456 ¤t_forest,
457 &mut continuation_stack,
458 host,
459 tracer,
460 )?,
461 Continuation::FinishDyn(node_id) => self.finish_dyn_node(
462 node_id,
463 ¤t_forest,
464 &mut continuation_stack,
465 host,
466 tracer,
467 )?,
468 Continuation::EnterForest(previous_forest) => {
469 // Restore the previous forest
470 current_forest = previous_forest;
471 },
472 }
473 }
474
475 StackOutputs::new(
476 self.stack[self.stack_bot_idx..self.stack_top_idx]
477 .iter()
478 .rev()
479 .copied()
480 .collect(),
481 )
482 .map_err(|_| {
483 ExecutionError::OutputStackOverflow(
484 self.stack_top_idx - self.stack_bot_idx - MIN_STACK_DEPTH,
485 )
486 })
487 }
488
489 // DECORATOR EXECUTORS
490 // --------------------------------------------------------------------------------------------
491
492 /// Executes the decorators that should be executed before entering a node.
493 fn execute_before_enter_decorators(
494 &mut self,
495 node_id: MastNodeId,
496 current_forest: &MastForest,
497 host: &mut impl AsyncHost,
498 ) -> Result<(), ExecutionError> {
499 let node = current_forest
500 .get_node_by_id(node_id)
501 .expect("internal error: node id {node_id} not found in current forest");
502
503 for &decorator_id in node.before_enter() {
504 self.execute_decorator(¤t_forest[decorator_id], host)?;
505 }
506
507 Ok(())
508 }
509
510 /// Executes the decorators that should be executed after exiting a node.
511 fn execute_after_exit_decorators(
512 &mut self,
513 node_id: MastNodeId,
514 current_forest: &MastForest,
515 host: &mut impl AsyncHost,
516 ) -> Result<(), ExecutionError> {
517 let node = current_forest
518 .get_node_by_id(node_id)
519 .expect("internal error: node id {node_id} not found in current forest");
520
521 for &decorator_id in node.after_exit() {
522 self.execute_decorator(¤t_forest[decorator_id], host)?;
523 }
524
525 Ok(())
526 }
527
528 /// Executes the specified decorator
529 fn execute_decorator(
530 &mut self,
531 decorator: &Decorator,
532 host: &mut impl AsyncHost,
533 ) -> Result<(), ExecutionError> {
534 match decorator {
535 Decorator::Debug(options) => {
536 if self.in_debug_mode {
537 let process = &mut self.state();
538 host.on_debug(process, options)?;
539 }
540 },
541 Decorator::AsmOp(_assembly_op) => {
542 // do nothing
543 },
544 Decorator::Trace(id) => {
545 let process = &mut self.state();
546 host.on_trace(process, *id)?;
547 },
548 };
549 Ok(())
550 }
551
552 // HELPERS
553 // ----------------------------------------------------------------------------------------------
554
555 /// Increments the clock by 1.
556 #[inline(always)]
557 fn increment_clk(&mut self, tracer: &mut impl Tracer) {
558 self.clk += 1_u32;
559
560 tracer.increment_clk();
561 }
562
563 async fn load_mast_forest<E>(
564 &mut self,
565 node_digest: Word,
566 host: &mut impl AsyncHost,
567 get_mast_forest_failed: impl Fn(Word, &E) -> ExecutionError,
568 err_ctx: &E,
569 ) -> Result<(MastNodeId, Arc<MastForest>), ExecutionError>
570 where
571 E: ErrorContext,
572 {
573 let mast_forest = host
574 .get_mast_forest(&node_digest)
575 .await
576 .ok_or_else(|| get_mast_forest_failed(node_digest, err_ctx))?;
577
578 // We limit the parts of the program that can be called externally to procedure
579 // roots, even though MAST doesn't have that restriction.
580 let root_id = mast_forest
581 .find_procedure_root(node_digest)
582 .ok_or(ExecutionError::malfored_mast_forest_in_host(node_digest, err_ctx))?;
583
584 // Merge the advice map of this forest into the advice provider.
585 // Note that the map may be merged multiple times if a different procedure from the same
586 // forest is called.
587 // For now, only compiled libraries contain non-empty advice maps, so for most cases,
588 // this call will be cheap.
589 self.advice
590 .extend_map(mast_forest.advice_map())
591 .map_err(|err| ExecutionError::advice_error(err, self.clk, err_ctx))?;
592
593 Ok((root_id, mast_forest))
594 }
595
596 /// Increments the stack top pointer by 1.
597 ///
598 /// The bottom of the stack is never affected by this operation.
599 #[inline(always)]
600 fn increment_stack_size(&mut self, tracer: &mut impl Tracer) {
601 tracer.increment_stack_size(self);
602
603 self.stack_top_idx += 1;
604 }
605
606 /// Decrements the stack top pointer by 1.
607 ///
608 /// The bottom of the stack is only decremented in cases where the stack depth would become less
609 /// than 16.
610 #[inline(always)]
611 fn decrement_stack_size(&mut self, tracer: &mut impl Tracer) {
612 if self.stack_top_idx == MIN_STACK_DEPTH {
613 // We no longer have any room in the stack buffer to decrement the stack size (which
614 // would cause the `stack_bot_idx` to go below 0). We therefore reset the stack to its
615 // original position.
616 self.reset_stack_in_buffer(INITIAL_STACK_TOP_IDX);
617 }
618
619 self.stack_top_idx -= 1;
620 self.stack_bot_idx = min(self.stack_bot_idx, self.stack_top_idx - MIN_STACK_DEPTH);
621
622 tracer.decrement_stack_size();
623 }
624
625 /// Resets the stack in the buffer to a new position, preserving the top 16 elements of the
626 /// stack.
627 ///
628 /// # Preconditions
629 /// - The stack is expected to have exactly 16 elements.
630 #[inline(always)]
631 fn reset_stack_in_buffer(&mut self, new_stack_top_idx: usize) {
632 debug_assert_eq!(self.stack_depth(), MIN_STACK_DEPTH as u32);
633
634 let new_stack_bot_idx = new_stack_top_idx - MIN_STACK_DEPTH;
635
636 // Copy stack to its new position
637 self.stack
638 .copy_within(self.stack_bot_idx..self.stack_top_idx, new_stack_bot_idx);
639
640 // Zero out stack below the new new_stack_bot_idx, since this is where overflow values
641 // come from, and are guaranteed to be ZERO. We don't need to zero out above
642 // `stack_top_idx`, since values there are never read before being written.
643 self.stack[0..new_stack_bot_idx].fill(ZERO);
644
645 // Update indices.
646 self.stack_bot_idx = new_stack_bot_idx;
647 self.stack_top_idx = new_stack_top_idx;
648 }
649
650 // TESTING
651 // ----------------------------------------------------------------------------------------------
652
653 /// Convenience sync wrapper to [Self::execute] for testing purposes.
654 #[cfg(any(test, feature = "testing"))]
655 pub fn execute_sync(
656 self,
657 program: &Program,
658 host: &mut impl AsyncHost,
659 ) -> Result<StackOutputs, ExecutionError> {
660 // Create a new Tokio runtime and block on the async execution
661 let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
662
663 let execution_output = rt.block_on(self.execute(program, host))?;
664
665 Ok(execution_output.stack)
666 }
667
668 /// Convenience sync wrapper to [Self::execute_for_trace] for testing purposes.
669 #[cfg(any(test, feature = "testing"))]
670 pub fn execute_for_trace_sync(
671 self,
672 program: &Program,
673 host: &mut impl AsyncHost,
674 ) -> Result<(ExecutionOutput, Vec<TraceFragmentContext>), ExecutionError> {
675 // Create a new Tokio runtime and block on the async execution
676 let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
677
678 rt.block_on(self.execute_for_trace(program, host))
679 }
680
681 /// Similar to [Self::execute_sync], but allows mutable access to the processor.
682 #[cfg(any(test, feature = "testing"))]
683 pub fn execute_sync_mut(
684 &mut self,
685 program: &Program,
686 host: &mut impl AsyncHost,
687 ) -> Result<StackOutputs, ExecutionError> {
688 // Create a new Tokio runtime and block on the async execution
689 let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
690
691 rt.block_on(self.execute_impl(program, host, &mut NoopTracer))
692 }
693}
694
695// EXECUTION OUTPUT
696// ===============================================================================================
697
698/// The output of a program execution, containing the state of the stack, advice provider, and
699/// memory at the end of the execution.
700#[derive(Debug)]
701pub struct ExecutionOutput {
702 pub stack: StackOutputs,
703 pub advice: AdviceProvider,
704 pub memory: Memory,
705}
706
707// FAST PROCESS STATE
708// ===============================================================================================
709
710#[derive(Debug)]
711pub struct FastProcessState<'a> {
712 pub(super) processor: &'a mut FastProcessor,
713}
714
715impl FastProcessor {
716 #[inline(always)]
717 pub fn state(&mut self) -> ProcessState<'_> {
718 ProcessState::Fast(FastProcessState { processor: self })
719 }
720}
721
722// EXECUTION CONTEXT INFO
723// ===============================================================================================
724
725/// Information about the execution context.
726///
727/// This struct is used to keep track of the information needed to return to the previous context
728/// upon return from a `call`, `syscall` or `dyncall`.
729#[derive(Debug)]
730struct ExecutionContextInfo {
731 /// This stores all the elements on the stack at the call site, excluding the top 16 elements.
732 /// This corresponds to the overflow table in [crate::Process].
733 overflow_stack: Vec<Felt>,
734 ctx: ContextId,
735 fn_hash: Word,
736 fmp: Felt,
737}