miden_processor/fast/mod.rs
1use alloc::{boxed::Box, sync::Arc, vec::Vec};
2use core::cmp::min;
3
4use memory::Memory;
5use miden_air::{Felt, RowIndex};
6use miden_core::{
7 Decorator, EMPTY_WORD, Program, StackOutputs, WORD_SIZE, Word, ZERO,
8 mast::{MastForest, MastNode, MastNodeExt, MastNodeId},
9 precompile::PrecompileTranscript,
10 stack::MIN_STACK_DEPTH,
11 utils::range,
12};
13
14use crate::{
15 AdviceInputs, AdviceProvider, AsyncHost, ContextId, ErrorContext, ExecutionError, ProcessState,
16 chiplets::Ace,
17 continuation_stack::{Continuation, ContinuationStack},
18 fast::execution_tracer::{ExecutionTracer, TraceGenerationContext},
19};
20
21pub mod execution_tracer;
22mod memory;
23mod operation;
24pub mod trace_state;
25mod tracer;
26pub use tracer::{NoopTracer, Tracer};
27
28mod basic_block;
29mod call_and_dyn;
30mod external;
31mod join;
32mod r#loop;
33mod split;
34
35#[cfg(test)]
36mod tests;
37
38/// The size of the stack buffer.
39///
40/// Note: This value is much larger than it needs to be for the majority of programs. However, some
41/// existing programs need it (e.g. `std::math::secp256k1::group::gen_mul`), so we're forced to push
42/// it up. At this high a value, we're starting to see some performance degradation on benchmarks.
43/// For example, the blake3 benchmark went from 285 MHz to 250 MHz (~10% degradation). Perhaps a
44/// better solution would be to make this value much smaller (~1000), and then fallback to a `Vec`
45/// if the stack overflows.
46const STACK_BUFFER_SIZE: usize = 6850;
47
48/// The initial position of the top of the stack in the stack buffer.
49///
50/// We place this value close to 0 because if a program hits the limit, it's much more likely to hit
51/// the upper bound than the lower bound, since hitting the lower bound only occurs when you drop
52/// 0's that were generated automatically to keep the stack depth at 16. In practice, if this
53/// occurs, it is most likely a bug.
54const INITIAL_STACK_TOP_IDX: usize = 250;
55
56/// A fast processor which doesn't generate any trace.
57///
58/// This processor is designed to be as fast as possible. Hence, it only keeps track of the current
59/// state of the processor (i.e. the stack, current clock cycle, current memory context, and free
60/// memory pointer).
61///
62/// # Stack Management
63/// A few key points about how the stack was designed for maximum performance:
64///
65/// - The stack has a fixed buffer size defined by `STACK_BUFFER_SIZE`.
66/// - This was observed to increase performance by at least 2x compared to using a `Vec` with
67/// `push()` & `pop()`.
68/// - We track the stack top and bottom using indices `stack_top_idx` and `stack_bot_idx`,
69/// respectively.
70/// - Since we are using a fixed-size buffer, we need to ensure that stack buffer accesses are not
71/// out of bounds. Naively, we could check for this on every access. However, every operation
72/// alters the stack depth by a predetermined amount, allowing us to precisely determine the
73/// minimum number of operations required to reach a stack buffer boundary, whether at the top or
74/// bottom.
75/// - For example, if the stack top is 10 elements away from the top boundary, and the stack
76/// bottom is 15 elements away from the bottom boundary, then we can safely execute 10
77/// operations that modify the stack depth with no bounds check.
78/// - When switching contexts (e.g., during a call or syscall), all elements past the first 16 are
79/// stored in an `ExecutionContextInfo` struct, and the stack is truncated to 16 elements. This
80/// will be restored when returning from the call or syscall.
81///
82/// # Clock Cycle Management
83/// - The clock cycle (`clk`) is managed in the same way as in `Process`. That is, it is incremented
84/// by 1 for every row that `Process` adds to the main trace.
85/// - It is important to do so because the clock cycle is used to determine the context ID for
86/// new execution contexts when using `call` or `dyncall`.
87#[derive(Debug)]
88pub struct FastProcessor {
89 /// The stack is stored in reverse order, so that the last element is at the top of the stack.
90 pub(super) stack: Box<[Felt; STACK_BUFFER_SIZE]>,
91 /// The index of the top of the stack.
92 stack_top_idx: usize,
93 /// The index of the bottom of the stack.
94 stack_bot_idx: usize,
95
96 /// The current clock cycle.
97 pub(super) clk: RowIndex,
98
99 /// The current context ID.
100 pub(super) ctx: ContextId,
101
102 /// The hash of the function that called into the current context, or `[ZERO, ZERO, ZERO,
103 /// ZERO]` if we are in the first context (i.e. when `call_stack` is empty).
104 pub(super) caller_hash: Word,
105
106 /// The advice provider to be used during execution.
107 pub(super) advice: AdviceProvider,
108
109 /// A map from (context_id, word_address) to the word stored starting at that memory location.
110 pub(super) memory: Memory,
111
112 /// A map storing metadata per call to the ACE chiplet.
113 pub(super) ace: Ace,
114
115 /// The call stack is used when starting a new execution context (from a `call`, `syscall` or
116 /// `dyncall`) to keep track of the information needed to return to the previous context upon
117 /// return. It is a stack since calls can be nested.
118 call_stack: Vec<ExecutionContextInfo>,
119
120 /// Whether to enable debug statements and tracing.
121 in_debug_mode: bool,
122
123 /// Transcript used to record commitments via `log_precompile` instruction (implemented via RPO
124 /// sponge).
125 pc_transcript: PrecompileTranscript,
126}
127
128impl FastProcessor {
129 // CONSTRUCTORS
130 // ----------------------------------------------------------------------------------------------
131
132 /// Creates a new `FastProcessor` instance with the given stack inputs.
133 ///
134 /// # Panics
135 /// - Panics if the length of `stack_inputs` is greater than `MIN_STACK_DEPTH`.
136 pub fn new(stack_inputs: &[Felt]) -> Self {
137 Self::initialize(stack_inputs, AdviceInputs::default(), false)
138 }
139
140 /// Creates a new `FastProcessor` instance with the given stack and advice inputs.
141 ///
142 /// # Panics
143 /// - Panics if the length of `stack_inputs` is greater than `MIN_STACK_DEPTH`.
144 pub fn new_with_advice_inputs(stack_inputs: &[Felt], advice_inputs: AdviceInputs) -> Self {
145 Self::initialize(stack_inputs, advice_inputs, false)
146 }
147
148 /// Creates a new `FastProcessor` instance, set to debug mode, with the given stack
149 /// and advice inputs.
150 ///
151 /// # Panics
152 /// - Panics if the length of `stack_inputs` is greater than `MIN_STACK_DEPTH`.
153 pub fn new_debug(stack_inputs: &[Felt], advice_inputs: AdviceInputs) -> Self {
154 Self::initialize(stack_inputs, advice_inputs, true)
155 }
156
157 /// Generic constructor unifying the above public ones.
158 ///
159 /// The stack inputs are expected to be stored in reverse order. For example, if `stack_inputs =
160 /// [1,2,3]`, then the stack will be initialized as `[3,2,1,0,0,...]`, with `3` being on
161 /// top.
162 fn initialize(stack_inputs: &[Felt], advice_inputs: AdviceInputs, in_debug_mode: bool) -> Self {
163 assert!(stack_inputs.len() <= MIN_STACK_DEPTH);
164
165 let stack_top_idx = INITIAL_STACK_TOP_IDX;
166 let stack = {
167 // Note: we use `Vec::into_boxed_slice()` here, since `Box::new([T; N])` first allocates
168 // the array on the stack, and then moves it to the heap. This might cause a
169 // stack overflow on some systems.
170 let mut stack: Box<[Felt; STACK_BUFFER_SIZE]> =
171 vec![ZERO; STACK_BUFFER_SIZE].into_boxed_slice().try_into().unwrap();
172 let bottom_idx = stack_top_idx - stack_inputs.len();
173
174 stack[bottom_idx..stack_top_idx].copy_from_slice(stack_inputs);
175 stack
176 };
177
178 Self {
179 advice: advice_inputs.into(),
180 stack,
181 stack_top_idx,
182 stack_bot_idx: stack_top_idx - MIN_STACK_DEPTH,
183 clk: 0_u32.into(),
184 ctx: 0_u32.into(),
185 caller_hash: EMPTY_WORD,
186 memory: Memory::new(),
187 call_stack: Vec::new(),
188 ace: Ace::default(),
189 in_debug_mode,
190 pc_transcript: PrecompileTranscript::new(),
191 }
192 }
193
194 // ACCESSORS
195 // -------------------------------------------------------------------------------------------
196
197 /// Returns the size of the stack.
198 #[inline(always)]
199 fn stack_size(&self) -> usize {
200 self.stack_top_idx - self.stack_bot_idx
201 }
202
203 /// Returns the stack, such that the top of the stack is at the last index of the returned
204 /// slice.
205 pub fn stack(&self) -> &[Felt] {
206 &self.stack[self.stack_bot_idx..self.stack_top_idx]
207 }
208
209 /// Returns the top 16 elements of the stack.
210 pub fn stack_top(&self) -> &[Felt] {
211 &self.stack[self.stack_top_idx - MIN_STACK_DEPTH..self.stack_top_idx]
212 }
213
214 /// Returns a mutable reference to the top 16 elements of the stack.
215 pub fn stack_top_mut(&mut self) -> &mut [Felt] {
216 &mut self.stack[self.stack_top_idx - MIN_STACK_DEPTH..self.stack_top_idx]
217 }
218
219 /// Returns the element on the stack at index `idx`.
220 #[inline(always)]
221 pub fn stack_get(&self, idx: usize) -> Felt {
222 self.stack[self.stack_top_idx - idx - 1]
223 }
224
225 /// Mutable variant of `stack_get()`.
226 #[inline(always)]
227 pub fn stack_get_mut(&mut self, idx: usize) -> &mut Felt {
228 &mut self.stack[self.stack_top_idx - idx - 1]
229 }
230
231 /// Returns the word on the stack starting at index `start_idx` in "stack order".
232 ///
233 /// That is, for `start_idx=0` the top element of the stack will be at the last position in the
234 /// word.
235 ///
236 /// For example, if the stack looks like this:
237 ///
238 /// top bottom
239 /// v v
240 /// a | b | c | d | e | f | g | h | i | j | k | l | m | n | o | p
241 ///
242 /// Then
243 /// - `stack_get_word(0)` returns `[d, c, b, a]`,
244 /// - `stack_get_word(1)` returns `[e, d, c ,b]`,
245 /// - etc.
246 #[inline(always)]
247 pub fn stack_get_word(&self, start_idx: usize) -> Word {
248 // Ensure we have enough elements to form a complete word
249 debug_assert!(
250 start_idx + WORD_SIZE <= self.stack_depth() as usize,
251 "Not enough elements on stack to read word starting at index {start_idx}"
252 );
253
254 let word_start_idx = self.stack_top_idx - start_idx - 4;
255 let result: [Felt; WORD_SIZE] =
256 self.stack[range(word_start_idx, WORD_SIZE)].try_into().unwrap();
257 result.into()
258 }
259
260 /// Returns the number of elements on the stack in the current context.
261 #[inline(always)]
262 pub fn stack_depth(&self) -> u32 {
263 (self.stack_top_idx - self.stack_bot_idx) as u32
264 }
265
266 // MUTATORS
267 // -------------------------------------------------------------------------------------------
268
269 /// Writes an element to the stack at the given index.
270 #[inline(always)]
271 pub fn stack_write(&mut self, idx: usize, element: Felt) {
272 self.stack[self.stack_top_idx - idx - 1] = element
273 }
274
275 /// Writes a word to the stack starting at the given index.
276 ///
277 /// The index is the index of the first element of the word, and the word is written in reverse
278 /// order.
279 #[inline(always)]
280 pub fn stack_write_word(&mut self, start_idx: usize, word: &Word) {
281 debug_assert!(start_idx < MIN_STACK_DEPTH);
282
283 let word_start_idx = self.stack_top_idx - start_idx - 4;
284 let source: [Felt; WORD_SIZE] = (*word).into();
285 self.stack[range(word_start_idx, WORD_SIZE)].copy_from_slice(&source)
286 }
287
288 /// Swaps the elements at the given indices on the stack.
289 #[inline(always)]
290 pub fn stack_swap(&mut self, idx1: usize, idx2: usize) {
291 let a = self.stack_get(idx1);
292 let b = self.stack_get(idx2);
293 self.stack_write(idx1, b);
294 self.stack_write(idx2, a);
295 }
296
297 // EXECUTE
298 // -------------------------------------------------------------------------------------------
299
300 /// Executes the given program and returns the stack outputs as well as the advice provider.
301 pub async fn execute(
302 self,
303 program: &Program,
304 host: &mut impl AsyncHost,
305 ) -> Result<ExecutionOutput, ExecutionError> {
306 self.execute_with_tracer(program, host, &mut NoopTracer).await
307 }
308
309 /// Executes the given program and returns the stack outputs, the advice provider, and
310 /// context necessary to build the trace.
311 pub async fn execute_for_trace(
312 self,
313 program: &Program,
314 host: &mut impl AsyncHost,
315 fragment_size: usize,
316 ) -> Result<(ExecutionOutput, TraceGenerationContext), ExecutionError> {
317 let mut tracer = ExecutionTracer::new(fragment_size);
318 let execution_output = self.execute_with_tracer(program, host, &mut tracer).await?;
319
320 // Pass the final precompile transcript from execution output to the trace generation
321 // context
322 let context = tracer.into_trace_generation_context(execution_output.final_pc_transcript);
323
324 Ok((execution_output, context))
325 }
326
327 /// Executes the given program with the provided tracer and returns the stack outputs, and the
328 /// advice provider.
329 pub async fn execute_with_tracer(
330 mut self,
331 program: &Program,
332 host: &mut impl AsyncHost,
333 tracer: &mut impl Tracer,
334 ) -> Result<ExecutionOutput, ExecutionError> {
335 let stack_outputs = self.execute_impl(program, host, tracer).await?;
336
337 Ok(ExecutionOutput {
338 stack: stack_outputs,
339 advice: self.advice,
340 memory: self.memory,
341 final_pc_transcript: self.pc_transcript,
342 })
343 }
344
345 /// Executes the given program with the provided tracer and returns the stack outputs.
346 ///
347 /// This function takes a `&mut self` (compared to `self` for the public execute functions) so
348 /// that the processor state may be accessed after execution. It is incorrect to execute a
349 /// second program using the same processor. This is mainly meant to be used in tests.
350 async fn execute_impl(
351 &mut self,
352 program: &Program,
353 host: &mut impl AsyncHost,
354 tracer: &mut impl Tracer,
355 ) -> Result<StackOutputs, ExecutionError> {
356 let mut continuation_stack = ContinuationStack::new(program);
357 let mut current_forest = program.mast_forest().clone();
358
359 // Merge the program's advice map into the advice provider
360 self.advice
361 .extend_map(current_forest.advice_map())
362 .map_err(|err| ExecutionError::advice_error(err, self.clk, &()))?;
363
364 while let Some(continuation) = continuation_stack.pop_continuation() {
365 match continuation {
366 Continuation::StartNode(node_id) => {
367 let node = current_forest.get_node_by_id(node_id).unwrap();
368
369 match node {
370 MastNode::Block(basic_block_node) => {
371 self.execute_basic_block_node(
372 basic_block_node,
373 node_id,
374 ¤t_forest,
375 host,
376 &mut continuation_stack,
377 ¤t_forest,
378 tracer,
379 )
380 .await?
381 },
382 MastNode::Join(join_node) => self.start_join_node(
383 join_node,
384 node_id,
385 ¤t_forest,
386 &mut continuation_stack,
387 host,
388 tracer,
389 )?,
390 MastNode::Split(split_node) => self.start_split_node(
391 split_node,
392 node_id,
393 ¤t_forest,
394 &mut continuation_stack,
395 host,
396 tracer,
397 )?,
398 MastNode::Loop(loop_node) => self.start_loop_node(
399 loop_node,
400 node_id,
401 ¤t_forest,
402 &mut continuation_stack,
403 host,
404 tracer,
405 )?,
406 MastNode::Call(call_node) => self.start_call_node(
407 call_node,
408 node_id,
409 program,
410 ¤t_forest,
411 &mut continuation_stack,
412 host,
413 tracer,
414 )?,
415 MastNode::Dyn(_) => {
416 self.start_dyn_node(
417 node_id,
418 &mut current_forest,
419 &mut continuation_stack,
420 host,
421 tracer,
422 )
423 .await?
424 },
425 MastNode::External(_external_node) => {
426 self.execute_external_node(
427 node_id,
428 &mut current_forest,
429 &mut continuation_stack,
430 host,
431 tracer,
432 )
433 .await?
434 },
435 }
436 },
437 Continuation::FinishJoin(node_id) => self.finish_join_node(
438 node_id,
439 ¤t_forest,
440 &mut continuation_stack,
441 host,
442 tracer,
443 )?,
444 Continuation::FinishSplit(node_id) => self.finish_split_node(
445 node_id,
446 ¤t_forest,
447 &mut continuation_stack,
448 host,
449 tracer,
450 )?,
451 Continuation::FinishLoop(node_id) => self.finish_loop_node(
452 node_id,
453 ¤t_forest,
454 &mut continuation_stack,
455 host,
456 tracer,
457 )?,
458 Continuation::FinishCall(node_id) => self.finish_call_node(
459 node_id,
460 ¤t_forest,
461 &mut continuation_stack,
462 host,
463 tracer,
464 )?,
465 Continuation::FinishDyn(node_id) => self.finish_dyn_node(
466 node_id,
467 ¤t_forest,
468 &mut continuation_stack,
469 host,
470 tracer,
471 )?,
472 Continuation::FinishExternal(node_id) => {
473 // Execute after_exit decorators when returning from an external node
474 // Note: current_forest should already be restored by EnterForest continuation
475 self.execute_after_exit_decorators(node_id, ¤t_forest, host)?;
476 },
477 Continuation::EnterForest(previous_forest) => {
478 // Restore the previous forest
479 current_forest = previous_forest;
480 },
481 }
482 }
483
484 StackOutputs::new(
485 self.stack[self.stack_bot_idx..self.stack_top_idx]
486 .iter()
487 .rev()
488 .copied()
489 .collect(),
490 )
491 .map_err(|_| {
492 ExecutionError::OutputStackOverflow(
493 self.stack_top_idx - self.stack_bot_idx - MIN_STACK_DEPTH,
494 )
495 })
496 }
497
498 // DECORATOR EXECUTORS
499 // --------------------------------------------------------------------------------------------
500
501 /// Executes the decorators that should be executed before entering a node.
502 fn execute_before_enter_decorators(
503 &mut self,
504 node_id: MastNodeId,
505 current_forest: &MastForest,
506 host: &mut impl AsyncHost,
507 ) -> Result<(), ExecutionError> {
508 let node = current_forest
509 .get_node_by_id(node_id)
510 .expect("internal error: node id {node_id} not found in current forest");
511
512 for &decorator_id in node.before_enter() {
513 self.execute_decorator(¤t_forest[decorator_id], host)?;
514 }
515
516 Ok(())
517 }
518
519 /// Executes the decorators that should be executed after exiting a node.
520 fn execute_after_exit_decorators(
521 &mut self,
522 node_id: MastNodeId,
523 current_forest: &MastForest,
524 host: &mut impl AsyncHost,
525 ) -> Result<(), ExecutionError> {
526 let node = current_forest
527 .get_node_by_id(node_id)
528 .expect("internal error: node id {node_id} not found in current forest");
529
530 for &decorator_id in node.after_exit() {
531 self.execute_decorator(¤t_forest[decorator_id], host)?;
532 }
533
534 Ok(())
535 }
536
537 /// Executes the specified decorator
538 fn execute_decorator(
539 &mut self,
540 decorator: &Decorator,
541 host: &mut impl AsyncHost,
542 ) -> Result<(), ExecutionError> {
543 match decorator {
544 Decorator::Debug(options) => {
545 if self.in_debug_mode {
546 let process = &mut self.state();
547 host.on_debug(process, options)?;
548 }
549 },
550 Decorator::AsmOp(_assembly_op) => {
551 // do nothing
552 },
553 Decorator::Trace(id) => {
554 let process = &mut self.state();
555 host.on_trace(process, *id)?;
556 },
557 };
558 Ok(())
559 }
560
561 // HELPERS
562 // ----------------------------------------------------------------------------------------------
563
564 /// Increments the clock by 1.
565 #[inline(always)]
566 fn increment_clk(&mut self, tracer: &mut impl Tracer) {
567 self.clk += 1_u32;
568
569 tracer.increment_clk();
570 }
571
572 async fn load_mast_forest<E>(
573 &mut self,
574 node_digest: Word,
575 host: &mut impl AsyncHost,
576 get_mast_forest_failed: impl Fn(Word, &E) -> ExecutionError,
577 err_ctx: &E,
578 ) -> Result<(MastNodeId, Arc<MastForest>), ExecutionError>
579 where
580 E: ErrorContext,
581 {
582 let mast_forest = host
583 .get_mast_forest(&node_digest)
584 .await
585 .ok_or_else(|| get_mast_forest_failed(node_digest, err_ctx))?;
586
587 // We limit the parts of the program that can be called externally to procedure
588 // roots, even though MAST doesn't have that restriction.
589 let root_id = mast_forest
590 .find_procedure_root(node_digest)
591 .ok_or(ExecutionError::malfored_mast_forest_in_host(node_digest, err_ctx))?;
592
593 // Merge the advice map of this forest into the advice provider.
594 // Note that the map may be merged multiple times if a different procedure from the same
595 // forest is called.
596 // For now, only compiled libraries contain non-empty advice maps, so for most cases,
597 // this call will be cheap.
598 self.advice
599 .extend_map(mast_forest.advice_map())
600 .map_err(|err| ExecutionError::advice_error(err, self.clk, err_ctx))?;
601
602 Ok((root_id, mast_forest))
603 }
604
605 /// Increments the stack top pointer by 1.
606 ///
607 /// The bottom of the stack is never affected by this operation.
608 #[inline(always)]
609 fn increment_stack_size(&mut self, tracer: &mut impl Tracer) {
610 tracer.increment_stack_size(self);
611
612 self.stack_top_idx += 1;
613 }
614
615 /// Decrements the stack top pointer by 1.
616 ///
617 /// The bottom of the stack is only decremented in cases where the stack depth would become less
618 /// than 16.
619 #[inline(always)]
620 fn decrement_stack_size(&mut self, tracer: &mut impl Tracer) {
621 if self.stack_top_idx == MIN_STACK_DEPTH {
622 // We no longer have any room in the stack buffer to decrement the stack size (which
623 // would cause the `stack_bot_idx` to go below 0). We therefore reset the stack to its
624 // original position.
625 self.reset_stack_in_buffer(INITIAL_STACK_TOP_IDX);
626 }
627
628 self.stack_top_idx -= 1;
629 self.stack_bot_idx = min(self.stack_bot_idx, self.stack_top_idx - MIN_STACK_DEPTH);
630
631 tracer.decrement_stack_size();
632 }
633
634 /// Resets the stack in the buffer to a new position, preserving the top 16 elements of the
635 /// stack.
636 ///
637 /// # Preconditions
638 /// - The stack is expected to have exactly 16 elements.
639 #[inline(always)]
640 fn reset_stack_in_buffer(&mut self, new_stack_top_idx: usize) {
641 debug_assert_eq!(self.stack_depth(), MIN_STACK_DEPTH as u32);
642
643 let new_stack_bot_idx = new_stack_top_idx - MIN_STACK_DEPTH;
644
645 // Copy stack to its new position
646 self.stack
647 .copy_within(self.stack_bot_idx..self.stack_top_idx, new_stack_bot_idx);
648
649 // Zero out stack below the new new_stack_bot_idx, since this is where overflow values
650 // come from, and are guaranteed to be ZERO. We don't need to zero out above
651 // `stack_top_idx`, since values there are never read before being written.
652 self.stack[0..new_stack_bot_idx].fill(ZERO);
653
654 // Update indices.
655 self.stack_bot_idx = new_stack_bot_idx;
656 self.stack_top_idx = new_stack_top_idx;
657 }
658
659 // TESTING
660 // ----------------------------------------------------------------------------------------------
661
662 /// Convenience sync wrapper to [Self::execute] for testing purposes.
663 #[cfg(any(test, feature = "testing"))]
664 pub fn execute_sync(
665 self,
666 program: &Program,
667 host: &mut impl AsyncHost,
668 ) -> Result<StackOutputs, ExecutionError> {
669 // Create a new Tokio runtime and block on the async execution
670 let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
671
672 let execution_output = rt.block_on(self.execute(program, host))?;
673
674 Ok(execution_output.stack)
675 }
676
677 /// Convenience sync wrapper to [Self::execute_for_trace] for testing purposes.
678 #[cfg(any(test, feature = "testing"))]
679 pub fn execute_for_trace_sync(
680 self,
681 program: &Program,
682 host: &mut impl AsyncHost,
683 fragment_size: usize,
684 ) -> Result<(ExecutionOutput, TraceGenerationContext), ExecutionError> {
685 // Create a new Tokio runtime and block on the async execution
686 let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
687
688 rt.block_on(self.execute_for_trace(program, host, fragment_size))
689 }
690
691 /// Similar to [Self::execute_sync], but allows mutable access to the processor.
692 #[cfg(any(test, feature = "testing"))]
693 pub fn execute_sync_mut(
694 &mut self,
695 program: &Program,
696 host: &mut impl AsyncHost,
697 ) -> Result<StackOutputs, ExecutionError> {
698 // Create a new Tokio runtime and block on the async execution
699 let rt = tokio::runtime::Builder::new_current_thread().build().unwrap();
700
701 rt.block_on(self.execute_impl(program, host, &mut NoopTracer))
702 }
703}
704
705// EXECUTION OUTPUT
706// ===============================================================================================
707
708/// The output of a program execution, containing the state of the stack, advice provider,
709/// memory, and final precompile transcript at the end of execution.
710#[derive(Debug)]
711pub struct ExecutionOutput {
712 pub stack: StackOutputs,
713 pub advice: AdviceProvider,
714 pub memory: Memory,
715 pub final_pc_transcript: PrecompileTranscript,
716}
717
718// FAST PROCESS STATE
719// ===============================================================================================
720
721#[derive(Debug)]
722pub struct FastProcessState<'a> {
723 pub(super) processor: &'a mut FastProcessor,
724}
725
726impl FastProcessor {
727 #[inline(always)]
728 pub fn state(&mut self) -> ProcessState<'_> {
729 ProcessState::Fast(FastProcessState { processor: self })
730 }
731}
732
733// EXECUTION CONTEXT INFO
734// ===============================================================================================
735
736/// Information about the execution context.
737///
738/// This struct is used to keep track of the information needed to return to the previous context
739/// upon return from a `call`, `syscall` or `dyncall`.
740#[derive(Debug)]
741struct ExecutionContextInfo {
742 /// This stores all the elements on the stack at the call site, excluding the top 16 elements.
743 /// This corresponds to the overflow table in [crate::Process].
744 overflow_stack: Vec<Felt>,
745 ctx: ContextId,
746 fn_hash: Word,
747}