miden_processor/fast/mod.rs
1use alloc::{boxed::Box, sync::Arc, vec::Vec};
2use core::cmp::min;
3
4use memory::Memory;
5use miden_air::{Felt, RowIndex};
6use miden_core::{
7 Decorator, EMPTY_WORD, Program, StackOutputs, WORD_SIZE, Word, ZERO,
8 mast::{MastForest, MastNode, MastNodeExt, MastNodeId},
9 stack::MIN_STACK_DEPTH,
10 utils::range,
11};
12
13use crate::{
14 AdviceInputs, AdviceProvider, AsyncHost, ContextId, ErrorContext, ExecutionError, FMP_MIN,
15 ProcessState,
16 chiplets::Ace,
17 continuation_stack::{Continuation, ContinuationStack},
18 fast::{execution_tracer::ExecutionTracer, trace_state::TraceFragmentContext},
19};
20
21mod execution_tracer;
22mod memory;
23mod operation;
24pub mod trace_state;
25mod tracer;
26pub use tracer::{NoopTracer, Tracer};
27
28mod basic_block;
29mod call_and_dyn;
30mod external;
31mod join;
32mod r#loop;
33mod split;
34
35#[cfg(test)]
36mod tests;
37
38/// The size of the stack buffer.
39///
40/// Note: This value is much larger than it needs to be for the majority of programs. However, some
41/// existing programs need it (e.g. `std::math::secp256k1::group::gen_mul`), so we're forced to push
42/// it up. At this high a value, we're starting to see some performance degradation on benchmarks.
43/// For example, the blake3 benchmark went from 285 MHz to 250 MHz (~10% degradation). Perhaps a
44/// better solution would be to make this value much smaller (~1000), and then fallback to a `Vec`
45/// if the stack overflows.
46const STACK_BUFFER_SIZE: usize = 6850;
47
48/// The initial position of the top of the stack in the stack buffer.
49///
50/// We place this value close to 0 because if a program hits the limit, it's much more likely to hit
51/// the upper bound than the lower bound, since hitting the lower bound only occurs when you drop
52/// 0's that were generated automatically to keep the stack depth at 16. In practice, if this
53/// occurs, it is most likely a bug.
54const INITIAL_STACK_TOP_IDX: usize = 250;
55
56/// The number of rows per core trace fragment.
57pub const NUM_ROWS_PER_CORE_FRAGMENT: usize = 1024;
58
59/// A fast processor which doesn't generate any trace.
60///
61/// This processor is designed to be as fast as possible. Hence, it only keeps track of the current
62/// state of the processor (i.e. the stack, current clock cycle, current memory context, and free
63/// memory pointer).
64///
65/// # Stack Management
66/// A few key points about how the stack was designed for maximum performance:
67///
68/// - The stack has a fixed buffer size defined by `STACK_BUFFER_SIZE`.
69/// - This was observed to increase performance by at least 2x compared to using a `Vec` with
70/// `push()` & `pop()`.
71/// - We track the stack top and bottom using indices `stack_top_idx` and `stack_bot_idx`,
72/// respectively.
73/// - Since we are using a fixed-size buffer, we need to ensure that stack buffer accesses are not
74/// out of bounds. Naively, we could check for this on every access. However, every operation
75/// alters the stack depth by a predetermined amount, allowing us to precisely determine the
76/// minimum number of operations required to reach a stack buffer boundary, whether at the top or
77/// bottom.
78/// - For example, if the stack top is 10 elements away from the top boundary, and the stack
79/// bottom is 15 elements away from the bottom boundary, then we can safely execute 10
80/// operations that modify the stack depth with no bounds check.
81/// - When switching contexts (e.g., during a call or syscall), all elements past the first 16 are
82/// stored in an `ExecutionContextInfo` struct, and the stack is truncated to 16 elements. This
83/// will be restored when returning from the call or syscall.
84///
85/// # Clock Cycle Management
86/// - The clock cycle (`clk`) is managed in the same way as in `Process`. That is, it is incremented
87/// by 1 for every row that `Process` adds to the main trace.
88/// - It is important to do so because the clock cycle is used to determine the context ID for
89/// new execution contexts when using `call` or `dyncall`.
90#[derive(Debug)]
91pub struct FastProcessor {
92 /// The stack is stored in reverse order, so that the last element is at the top of the stack.
93 pub(super) stack: Box<[Felt; STACK_BUFFER_SIZE]>,
94 /// The index of the top of the stack.
95 stack_top_idx: usize,
96 /// The index of the bottom of the stack.
97 stack_bot_idx: usize,
98
99 /// The current clock cycle.
100 pub(super) clk: RowIndex,
101
102 /// The current context ID.
103 pub(super) ctx: ContextId,
104
105 /// The free memory pointer.
106 pub(super) fmp: Felt,
107
108 /// Whether we are currently in a syscall.
109 in_syscall: bool,
110
111 /// The hash of the function that called into the current context, or `[ZERO, ZERO, ZERO,
112 /// ZERO]` if we are in the first context (i.e. when `call_stack` is empty).
113 pub(super) caller_hash: Word,
114
115 /// The advice provider to be used during execution.
116 pub(super) advice: AdviceProvider,
117
118 /// A map from (context_id, word_address) to the word stored starting at that memory location.
119 pub(super) memory: Memory,
120
121 /// A map storing metadata per call to the ACE chiplet.
122 pub(super) ace: Ace,
123
124 /// The call stack is used when starting a new execution context (from a `call`, `syscall` or
125 /// `dyncall`) to keep track of the information needed to return to the previous context upon
126 /// return. It is a stack since calls can be nested.
127 call_stack: Vec<ExecutionContextInfo>,
128
129 /// Whether to enable debug statements and tracing.
130 in_debug_mode: bool,
131}
132
133impl FastProcessor {
134 // CONSTRUCTORS
135 // ----------------------------------------------------------------------------------------------
136
137 /// Creates a new `FastProcessor` instance with the given stack inputs.
138 ///
139 /// # Panics
140 /// - Panics if the length of `stack_inputs` is greater than `MIN_STACK_DEPTH`.
141 pub fn new(stack_inputs: &[Felt]) -> Self {
142 Self::initialize(stack_inputs, AdviceInputs::default(), false)
143 }
144
145 /// Creates a new `FastProcessor` instance with the given stack and advice inputs.
146 ///
147 /// # Panics
148 /// - Panics if the length of `stack_inputs` is greater than `MIN_STACK_DEPTH`.
149 pub fn new_with_advice_inputs(stack_inputs: &[Felt], advice_inputs: AdviceInputs) -> Self {
150 Self::initialize(stack_inputs, advice_inputs, false)
151 }
152
153 /// Creates a new `FastProcessor` instance, set to debug mode, with the given stack
154 /// and advice inputs.
155 ///
156 /// # Panics
157 /// - Panics if the length of `stack_inputs` is greater than `MIN_STACK_DEPTH`.
158 pub fn new_debug(stack_inputs: &[Felt], advice_inputs: AdviceInputs) -> Self {
159 Self::initialize(stack_inputs, advice_inputs, true)
160 }
161
162 /// Generic constructor unifying the above public ones.
163 ///
164 /// The stack inputs are expected to be stored in reverse order. For example, if `stack_inputs =
165 /// [1,2,3]`, then the stack will be initialized as `[3,2,1,0,0,...]`, with `3` being on
166 /// top.
167 fn initialize(stack_inputs: &[Felt], advice_inputs: AdviceInputs, in_debug_mode: bool) -> Self {
168 assert!(stack_inputs.len() <= MIN_STACK_DEPTH);
169
170 let stack_top_idx = INITIAL_STACK_TOP_IDX;
171 let stack = {
172 // Note: we use `Vec::into_boxed_slice()` here, since `Box::new([T; N])` first allocates
173 // the array on the stack, and then moves it to the heap. This might cause a
174 // stack overflow on some systems.
175 let mut stack: Box<[Felt; STACK_BUFFER_SIZE]> =
176 vec![ZERO; STACK_BUFFER_SIZE].into_boxed_slice().try_into().unwrap();
177 let bottom_idx = stack_top_idx - stack_inputs.len();
178
179 stack[bottom_idx..stack_top_idx].copy_from_slice(stack_inputs);
180 stack
181 };
182
183 Self {
184 advice: advice_inputs.into(),
185 stack,
186 stack_top_idx,
187 stack_bot_idx: stack_top_idx - MIN_STACK_DEPTH,
188 clk: 0_u32.into(),
189 ctx: 0_u32.into(),
190 fmp: Felt::new(FMP_MIN),
191 in_syscall: false,
192 caller_hash: EMPTY_WORD,
193 memory: Memory::new(),
194 call_stack: Vec::new(),
195 ace: Ace::default(),
196 in_debug_mode,
197 }
198 }
199
200 // ACCESSORS
201 // -------------------------------------------------------------------------------------------
202
203 /// Returns the size of the stack.
204 #[inline(always)]
205 fn stack_size(&self) -> usize {
206 self.stack_top_idx - self.stack_bot_idx
207 }
208
209 /// Returns the stack, such that the top of the stack is at the last index of the returned
210 /// slice.
211 pub fn stack(&self) -> &[Felt] {
212 &self.stack[self.stack_bot_idx..self.stack_top_idx]
213 }
214
215 /// Returns the top 16 elements of the stack.
216 pub fn stack_top(&self) -> &[Felt] {
217 &self.stack[self.stack_top_idx - MIN_STACK_DEPTH..self.stack_top_idx]
218 }
219
220 /// Returns a mutable reference to the top 16 elements of the stack.
221 pub fn stack_top_mut(&mut self) -> &mut [Felt] {
222 &mut self.stack[self.stack_top_idx - MIN_STACK_DEPTH..self.stack_top_idx]
223 }
224
225 /// Returns the element on the stack at index `idx`.
226 #[inline(always)]
227 pub fn stack_get(&self, idx: usize) -> Felt {
228 self.stack[self.stack_top_idx - idx - 1]
229 }
230
231 /// Mutable variant of `stack_get()`.
232 #[inline(always)]
233 pub fn stack_get_mut(&mut self, idx: usize) -> &mut Felt {
234 &mut self.stack[self.stack_top_idx - idx - 1]
235 }
236
237 /// Returns the word on the stack starting at index `start_idx` in "stack order".
238 ///
239 /// That is, for `start_idx=0` the top element of the stack will be at the last position in the
240 /// word.
241 ///
242 /// For example, if the stack looks like this:
243 ///
244 /// top bottom
245 /// v v
246 /// a | b | c | d | e | f | g | h | i | j | k | l | m | n | o | p
247 ///
248 /// Then
249 /// - `stack_get_word(0)` returns `[d, c, b, a]`,
250 /// - `stack_get_word(1)` returns `[e, d, c ,b]`,
251 /// - etc.
252 #[inline(always)]
253 pub fn stack_get_word(&self, start_idx: usize) -> Word {
254 // Ensure we have enough elements to form a complete word
255 debug_assert!(
256 start_idx + WORD_SIZE <= self.stack_depth() as usize,
257 "Not enough elements on stack to read word starting at index {start_idx}"
258 );
259
260 let word_start_idx = self.stack_top_idx - start_idx - 4;
261 let result: [Felt; WORD_SIZE] =
262 self.stack[range(word_start_idx, WORD_SIZE)].try_into().unwrap();
263 result.into()
264 }
265
266 /// Returns the number of elements on the stack in the current context.
267 #[inline(always)]
268 pub fn stack_depth(&self) -> u32 {
269 (self.stack_top_idx - self.stack_bot_idx) as u32
270 }
271
272 // MUTATORS
273 // -------------------------------------------------------------------------------------------
274
275 /// Writes an element to the stack at the given index.
276 #[inline(always)]
277 pub fn stack_write(&mut self, idx: usize, element: Felt) {
278 self.stack[self.stack_top_idx - idx - 1] = element
279 }
280
281 /// Writes a word to the stack starting at the given index.
282 ///
283 /// The index is the index of the first element of the word, and the word is written in reverse
284 /// order.
285 #[inline(always)]
286 pub fn stack_write_word(&mut self, start_idx: usize, word: &Word) {
287 debug_assert!(start_idx < MIN_STACK_DEPTH);
288
289 let word_start_idx = self.stack_top_idx - start_idx - 4;
290 let source: [Felt; WORD_SIZE] = (*word).into();
291 self.stack[range(word_start_idx, WORD_SIZE)].copy_from_slice(&source)
292 }
293
294 /// Swaps the elements at the given indices on the stack.
295 #[inline(always)]
296 pub fn stack_swap(&mut self, idx1: usize, idx2: usize) {
297 let a = self.stack_get(idx1);
298 let b = self.stack_get(idx2);
299 self.stack_write(idx1, b);
300 self.stack_write(idx2, a);
301 }
302
303 // EXECUTE
304 // -------------------------------------------------------------------------------------------
305
306 /// Executes the given program and returns the stack outputs as well as the advice provider.
307 pub async fn execute(
308 self,
309 program: &Program,
310 host: &mut impl AsyncHost,
311 ) -> Result<ExecutionOutput, ExecutionError> {
312 self.execute_with_tracer(program, host, &mut NoopTracer).await
313 }
314
315 /// Executes the given program and returns the stack outputs, the advice provider, and
316 /// information for building the trace.
317 pub async fn execute_for_trace(
318 self,
319 program: &Program,
320 host: &mut impl AsyncHost,
321 ) -> Result<(ExecutionOutput, Vec<TraceFragmentContext>), ExecutionError> {
322 let mut tracer = ExecutionTracer::default();
323 let execution_output = self.execute_with_tracer(program, host, &mut tracer).await?;
324
325 Ok((execution_output, tracer.into_fragment_contexts()))
326 }
327
328 /// Executes the given program with the provided tracer and returns the stack outputs, and the
329 /// advice provider.
330 pub async fn execute_with_tracer(
331 mut self,
332 program: &Program,
333 host: &mut impl AsyncHost,
334 tracer: &mut impl Tracer,
335 ) -> Result<ExecutionOutput, ExecutionError> {
336 let stack_outputs = self.execute_impl(program, host, tracer).await?;
337
338 Ok(ExecutionOutput {
339 stack: stack_outputs,
340 advice: self.advice,
341 memory: self.memory,
342 })
343 }
344
345 /// Executes the given program with the provided tracer and returns the stack outputs.
346 ///
347 /// This function takes a `&mut self` (compared to `self` for the public execute functions) so
348 /// that the processor state may be accessed after execution. It is incorrect to execute a
349 /// second program using the same processor. This is mainly meant to be used in tests.
350 async fn execute_impl(
351 &mut self,
352 program: &Program,
353 host: &mut impl AsyncHost,
354 tracer: &mut impl Tracer,
355 ) -> Result<StackOutputs, ExecutionError> {
356 let mut continuation_stack = ContinuationStack::new(program);
357 let mut current_forest = program.mast_forest().clone();
358
359 // Merge the program's advice map into the advice provider
360 self.advice
361 .extend_map(current_forest.advice_map())
362 .map_err(|err| ExecutionError::advice_error(err, self.clk, &()))?;
363
364 while let Some(continuation) = continuation_stack.pop_continuation() {
365 match continuation {
366 Continuation::StartNode(node_id) => {
367 let node = current_forest.get_node_by_id(node_id).unwrap();
368
369 match node {
370 MastNode::Block(basic_block_node) => {
371 self.execute_basic_block_node(
372 basic_block_node,
373 node_id,
374 ¤t_forest,
375 host,
376 &mut continuation_stack,
377 ¤t_forest,
378 tracer,
379 )
380 .await?
381 },
382 MastNode::Join(join_node) => self.start_join_node(
383 join_node,
384 node_id,
385 ¤t_forest,
386 &mut continuation_stack,
387 host,
388 tracer,
389 )?,
390 MastNode::Split(split_node) => self.start_split_node(
391 split_node,
392 node_id,
393 ¤t_forest,
394 &mut continuation_stack,
395 host,
396 tracer,
397 )?,
398 MastNode::Loop(loop_node) => self.start_loop_node(
399 loop_node,
400 node_id,
401 ¤t_forest,
402 &mut continuation_stack,
403 host,
404 tracer,
405 )?,
406 MastNode::Call(call_node) => self.start_call_node(
407 call_node,
408 node_id,
409 program,
410 ¤t_forest,
411 &mut continuation_stack,
412 host,
413 tracer,
414 )?,
415 MastNode::Dyn(_) => {
416 self.start_dyn_node(
417 node_id,
418 &mut current_forest,
419 &mut continuation_stack,
420 host,
421 tracer,
422 )
423 .await?
424 },
425 MastNode::External(_external_node) => {
426 self.execute_external_node(
427 node_id,
428 &mut current_forest,
429 &mut continuation_stack,
430 host,
431 tracer,
432 )
433 .await?
434 },
435 }
436 },
437 Continuation::FinishJoin(node_id) => self.finish_join_node(
438 node_id,
439 ¤t_forest,
440 &mut continuation_stack,
441 host,
442 tracer,
443 )?,
444 Continuation::FinishSplit(node_id) => self.finish_split_node(
445 node_id,
446 ¤t_forest,
447 &mut continuation_stack,
448 host,
449 tracer,
450 )?,
451 Continuation::FinishLoop(node_id) => self.finish_loop_node(
452 node_id,
453 ¤t_forest,
454 &mut continuation_stack,
455 host,
456 tracer,
457 )?,
458 Continuation::FinishCall(node_id) => self.finish_call_node(
459 node_id,
460 ¤t_forest,
461 &mut continuation_stack,
462 host,
463 tracer,
464 )?,
465 Continuation::FinishDyn(node_id) => self.finish_dyn_node(
466 node_id,
467 ¤t_forest,
468 &mut continuation_stack,
469 host,
470 tracer,
471 )?,
472 Continuation::EnterForest(previous_forest) => {
473 // Restore the previous forest
474 current_forest = previous_forest;
475 },
476 }
477 }
478
479 StackOutputs::new(
480 self.stack[self.stack_bot_idx..self.stack_top_idx]
481 .iter()
482 .rev()
483 .copied()
484 .collect(),
485 )
486 .map_err(|_| {
487 ExecutionError::OutputStackOverflow(
488 self.stack_top_idx - self.stack_bot_idx - MIN_STACK_DEPTH,
489 )
490 })
491 }
492
493 // DECORATOR EXECUTORS
494 // --------------------------------------------------------------------------------------------
495
496 /// Executes the decorators that should be executed before entering a node.
497 fn execute_before_enter_decorators(
498 &mut self,
499 node_id: MastNodeId,
500 current_forest: &MastForest,
501 host: &mut impl AsyncHost,
502 ) -> Result<(), ExecutionError> {
503 let node = current_forest
504 .get_node_by_id(node_id)
505 .expect("internal error: node id {node_id} not found in current forest");
506
507 for &decorator_id in node.before_enter() {
508 self.execute_decorator(¤t_forest[decorator_id], host)?;
509 }
510
511 Ok(())
512 }
513
514 /// Executes the decorators that should be executed after exiting a node.
515 fn execute_after_exit_decorators(
516 &mut self,
517 node_id: MastNodeId,
518 current_forest: &MastForest,
519 host: &mut impl AsyncHost,
520 ) -> Result<(), ExecutionError> {
521 let node = current_forest
522 .get_node_by_id(node_id)
523 .expect("internal error: node id {node_id} not found in current forest");
524
525 for &decorator_id in node.after_exit() {
526 self.execute_decorator(¤t_forest[decorator_id], host)?;
527 }
528
529 Ok(())
530 }
531
532 /// Executes the specified decorator
533 fn execute_decorator(
534 &mut self,
535 decorator: &Decorator,
536 host: &mut impl AsyncHost,
537 ) -> Result<(), ExecutionError> {
538 match decorator {
539 Decorator::Debug(options) => {
540 if self.in_debug_mode {
541 let process = &mut self.state();
542 host.on_debug(process, options)?;
543 }
544 },
545 Decorator::AsmOp(_assembly_op) => {
546 // do nothing
547 },
548 Decorator::Trace(id) => {
549 let process = &mut self.state();
550 host.on_trace(process, *id)?;
551 },
552 };
553 Ok(())
554 }
555
556 // HELPERS
557 // ----------------------------------------------------------------------------------------------
558
559 /// Increments the clock by 1.
560 #[inline(always)]
561 fn increment_clk(&mut self, tracer: &mut impl Tracer) {
562 self.clk += 1_u32;
563
564 tracer.increment_clk();
565 }
566
567 async fn load_mast_forest<E>(
568 &mut self,
569 node_digest: Word,
570 host: &mut impl AsyncHost,
571 get_mast_forest_failed: impl Fn(Word, &E) -> ExecutionError,
572 err_ctx: &E,
573 ) -> Result<(MastNodeId, Arc<MastForest>), ExecutionError>
574 where
575 E: ErrorContext,
576 {
577 let mast_forest = host
578 .get_mast_forest(&node_digest)
579 .await
580 .ok_or_else(|| get_mast_forest_failed(node_digest, err_ctx))?;
581
582 // We limit the parts of the program that can be called externally to procedure
583 // roots, even though MAST doesn't have that restriction.
584 let root_id = mast_forest
585 .find_procedure_root(node_digest)
586 .ok_or(ExecutionError::malfored_mast_forest_in_host(node_digest, err_ctx))?;
587
588 // Merge the advice map of this forest into the advice provider.
589 // Note that the map may be merged multiple times if a different procedure from the same
590 // forest is called.
591 // For now, only compiled libraries contain non-empty advice maps, so for most cases,
592 // this call will be cheap.
593 self.advice
594 .extend_map(mast_forest.advice_map())
595 .map_err(|err| ExecutionError::advice_error(err, self.clk, err_ctx))?;
596
597 Ok((root_id, mast_forest))
598 }
599
600 /// Increments the stack top pointer by 1.
601 ///
602 /// The bottom of the stack is never affected by this operation.
603 #[inline(always)]
604 fn increment_stack_size(&mut self, tracer: &mut impl Tracer) {
605 tracer.increment_stack_size(self);
606
607 self.stack_top_idx += 1;
608 }
609
610 /// Decrements the stack top pointer by 1.
611 ///
612 /// The bottom of the stack is only decremented in cases where the stack depth would become less
613 /// than 16.
614 #[inline(always)]
615 fn decrement_stack_size(&mut self, tracer: &mut impl Tracer) {
616 if self.stack_top_idx == MIN_STACK_DEPTH {
617 // We no longer have any room in the stack buffer to decrement the stack size (which
618 // would cause the `stack_bot_idx` to go below 0). We therefore reset the stack to its
619 // original position.
620 self.reset_stack_in_buffer(INITIAL_STACK_TOP_IDX);
621 }
622
623 self.stack_top_idx -= 1;
624 self.stack_bot_idx = min(self.stack_bot_idx, self.stack_top_idx - MIN_STACK_DEPTH);
625
626 tracer.decrement_stack_size();
627 }
628
629 /// Resets the stack in the buffer to a new position, preserving the top 16 elements of the
630 /// stack.
631 ///
632 /// # Preconditions
633 /// - The stack is expected to have exactly 16 elements.
634 #[inline(always)]
635 fn reset_stack_in_buffer(&mut self, new_stack_top_idx: usize) {
636 debug_assert_eq!(self.stack_depth(), MIN_STACK_DEPTH as u32);
637
638 let new_stack_bot_idx = new_stack_top_idx - MIN_STACK_DEPTH;
639
640 // Copy stack to its new position
641 self.stack
642 .copy_within(self.stack_bot_idx..self.stack_top_idx, new_stack_bot_idx);
643
644 // Zero out stack below the new new_stack_bot_idx, since this is where overflow values
645 // come from, and are guaranteed to be ZERO. We don't need to zero out above
646 // `stack_top_idx`, since values there are never read before being written.
647 self.stack[0..new_stack_bot_idx].fill(ZERO);
648
649 // Update indices.
650 self.stack_bot_idx = new_stack_bot_idx;
651 self.stack_top_idx = new_stack_top_idx;
652 }
653
654 // TESTING
655 // ----------------------------------------------------------------------------------------------
656
657 /// Convenience sync wrapper to [Self::execute] for testing purposes.
658 #[cfg(any(test, feature = "testing"))]
659 pub fn execute_sync(
660 self,
661 program: &Program,
662 host: &mut impl AsyncHost,
663 ) -> Result<StackOutputs, ExecutionError> {
664 // Create a new Tokio runtime and block on the async execution
665 let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
666
667 let execution_output = rt.block_on(self.execute(program, host))?;
668
669 Ok(execution_output.stack)
670 }
671
672 /// Convenience sync wrapper to [Self::execute_for_trace] for testing purposes.
673 #[cfg(any(test, feature = "testing"))]
674 pub fn execute_for_trace_sync(
675 self,
676 program: &Program,
677 host: &mut impl AsyncHost,
678 ) -> Result<(ExecutionOutput, Vec<TraceFragmentContext>), ExecutionError> {
679 // Create a new Tokio runtime and block on the async execution
680 let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
681
682 rt.block_on(self.execute_for_trace(program, host))
683 }
684
685 /// Similar to [Self::execute_sync], but allows mutable access to the processor.
686 #[cfg(any(test, feature = "testing"))]
687 pub fn execute_sync_mut(
688 &mut self,
689 program: &Program,
690 host: &mut impl AsyncHost,
691 ) -> Result<StackOutputs, ExecutionError> {
692 // Create a new Tokio runtime and block on the async execution
693 let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
694
695 rt.block_on(self.execute_impl(program, host, &mut NoopTracer))
696 }
697}
698
699// EXECUTION OUTPUT
700// ===============================================================================================
701
702/// The output of a program execution, containing the state of the stack, advice provider, and
703/// memory at the end of the execution.
704#[derive(Debug)]
705pub struct ExecutionOutput {
706 pub stack: StackOutputs,
707 pub advice: AdviceProvider,
708 pub memory: Memory,
709}
710
711// FAST PROCESS STATE
712// ===============================================================================================
713
714#[derive(Debug)]
715pub struct FastProcessState<'a> {
716 pub(super) processor: &'a mut FastProcessor,
717}
718
719impl FastProcessor {
720 #[inline(always)]
721 pub fn state(&mut self) -> ProcessState<'_> {
722 ProcessState::Fast(FastProcessState { processor: self })
723 }
724}
725
726// EXECUTION CONTEXT INFO
727// ===============================================================================================
728
729/// Information about the execution context.
730///
731/// This struct is used to keep track of the information needed to return to the previous context
732/// upon return from a `call`, `syscall` or `dyncall`.
733#[derive(Debug)]
734struct ExecutionContextInfo {
735 /// This stores all the elements on the stack at the call site, excluding the top 16 elements.
736 /// This corresponds to the overflow table in [crate::Process].
737 overflow_stack: Vec<Felt>,
738 ctx: ContextId,
739 fn_hash: Word,
740 fmp: Felt,
741}