1#![no_std]
2
3#[macro_use]
4extern crate alloc;
5
6#[cfg(feature = "std")]
7extern crate std;
8
9use alloc::{sync::Arc, vec::Vec};
10use core::fmt::{Display, LowerHex};
11
12use miden_air::trace::{
13 CHIPLETS_WIDTH, DECODER_TRACE_WIDTH, MIN_TRACE_LEN, RANGE_CHECK_TRACE_WIDTH, STACK_TRACE_WIDTH,
14 SYS_TRACE_WIDTH,
15};
16pub use miden_air::{ExecutionOptions, ExecutionOptionsError, RowIndex};
17pub use miden_core::{
18 AssemblyOp, EMPTY_WORD, Felt, Kernel, ONE, Operation, Program, ProgramInfo, QuadExtension,
19 StackInputs, StackOutputs, WORD_SIZE, Word, ZERO,
20 crypto::merkle::SMT_DEPTH,
21 errors::InputError,
22 mast::{MastForest, MastNode, MastNodeExt, MastNodeId},
23 precompile::{PrecompileRequest, PrecompileTranscriptState},
24 sys_events::SystemEvent,
25 utils::DeserializationError,
26};
27use miden_core::{
28 Decorator, FieldElement,
29 mast::{
30 BasicBlockNode, CallNode, DynNode, ExternalNode, JoinNode, LoopNode, OpBatch, SplitNode,
31 },
32};
33use miden_debug_types::SourceSpan;
34pub use winter_prover::matrix::ColMatrix;
35
36pub(crate) mod continuation_stack;
37
38pub mod fast;
39use fast::FastProcessState;
40pub mod parallel;
41pub(crate) mod processor;
42
43mod operations;
44
45mod system;
46pub use system::ContextId;
47use system::System;
48
49#[cfg(test)]
50mod test_utils;
51
52pub(crate) mod decoder;
53use decoder::Decoder;
54
55mod stack;
56use stack::Stack;
57
58mod range;
59use range::RangeChecker;
60
61mod host;
62
63pub use host::{
64 AdviceMutation, AsyncHost, BaseHost, FutureMaybeSend, MastForestStore, MemMastForestStore,
65 SyncHost,
66 advice::{AdviceError, AdviceInputs, AdviceProvider},
67 debug::DefaultDebugHandler,
68 default::{DefaultHost, HostLibrary},
69 handlers::{
70 AssertError, DebugError, DebugHandler, EventError, EventHandler, EventHandlerRegistry,
71 NoopEventHandler, TraceError,
72 },
73};
74
75mod chiplets;
76use chiplets::Chiplets;
77pub use chiplets::MemoryError;
78
79mod trace;
80use trace::TraceFragment;
81pub use trace::{ChipletsLengths, ExecutionTrace, NUM_RAND_ROWS, TraceLenSummary};
82
83mod errors;
84pub use errors::{ErrorContext, ErrorContextImpl, ExecutionError};
85
86pub mod utils;
87
88#[cfg(all(test, not(feature = "no_err_ctx")))]
89mod tests;
90
91mod debug;
92pub use debug::{AsmOpInfo, VmState, VmStateIterator};
93
94pub mod math {
98 pub use miden_core::{Felt, FieldElement, StarkField};
99 pub use winter_prover::math::fft;
100}
101
102pub mod crypto {
103 pub use miden_core::crypto::{
104 hash::{Blake3_192, Blake3_256, ElementHasher, Hasher, Poseidon2, Rpo256, Rpx256},
105 merkle::{
106 MerkleError, MerklePath, MerkleStore, MerkleTree, NodeIndex, PartialMerkleTree,
107 SimpleSmt,
108 },
109 random::{RandomCoin, RpoRandomCoin, RpxRandomCoin, WinterRandomCoin},
110 };
111}
112
113#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
117pub struct MemoryAddress(u32);
118
119impl From<u32> for MemoryAddress {
120 fn from(addr: u32) -> Self {
121 MemoryAddress(addr)
122 }
123}
124
125impl From<MemoryAddress> for u32 {
126 fn from(value: MemoryAddress) -> Self {
127 value.0
128 }
129}
130
131impl Display for MemoryAddress {
132 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
133 Display::fmt(&self.0, f)
134 }
135}
136
137impl LowerHex for MemoryAddress {
138 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
139 LowerHex::fmt(&self.0, f)
140 }
141}
142
143impl core::ops::Add<MemoryAddress> for MemoryAddress {
144 type Output = Self;
145
146 fn add(self, rhs: MemoryAddress) -> Self::Output {
147 MemoryAddress(self.0 + rhs.0)
148 }
149}
150
151impl core::ops::Add<u32> for MemoryAddress {
152 type Output = Self;
153
154 fn add(self, rhs: u32) -> Self::Output {
155 MemoryAddress(self.0 + rhs)
156 }
157}
158
159type SysTrace = [Vec<Felt>; SYS_TRACE_WIDTH];
160
161pub struct DecoderTrace {
162 trace: [Vec<Felt>; DECODER_TRACE_WIDTH],
163 aux_builder: decoder::AuxTraceBuilder,
164}
165
166pub struct StackTrace {
167 trace: [Vec<Felt>; STACK_TRACE_WIDTH],
168}
169
170pub struct RangeCheckTrace {
171 trace: [Vec<Felt>; RANGE_CHECK_TRACE_WIDTH],
172 aux_builder: range::AuxTraceBuilder,
173}
174
175pub struct ChipletsTrace {
176 trace: [Vec<Felt>; CHIPLETS_WIDTH],
177 aux_builder: chiplets::AuxTraceBuilder,
178}
179
180#[tracing::instrument("execute_program", skip_all)]
189pub fn execute(
190 program: &Program,
191 stack_inputs: StackInputs,
192 advice_inputs: AdviceInputs,
193 host: &mut impl SyncHost,
194 options: ExecutionOptions,
195) -> Result<ExecutionTrace, ExecutionError> {
196 let mut process = Process::new(program.kernel().clone(), stack_inputs, advice_inputs, options);
197 let stack_outputs = process.execute(program, host)?;
198 let trace = ExecutionTrace::new(process, stack_outputs);
199 assert_eq!(&program.hash(), trace.program_hash(), "inconsistent program hash");
200 Ok(trace)
201}
202
203pub fn execute_iter(
206 program: &Program,
207 stack_inputs: StackInputs,
208 advice_inputs: AdviceInputs,
209 host: &mut impl SyncHost,
210) -> VmStateIterator {
211 let mut process = Process::new_debug(program.kernel().clone(), stack_inputs, advice_inputs);
212 let result = process.execute(program, host);
213 if result.is_ok() {
214 assert_eq!(
215 program.hash(),
216 process.decoder.program_hash().into(),
217 "inconsistent program hash"
218 );
219 }
220 VmStateIterator::new(process, result)
221}
222
223#[cfg(not(any(test, feature = "testing")))]
236pub struct Process {
237 advice: AdviceProvider,
238 system: System,
239 decoder: Decoder,
240 stack: Stack,
241 range: RangeChecker,
242 chiplets: Chiplets,
243 max_cycles: u32,
244 enable_tracing: bool,
245 pc_transcript_state: PrecompileTranscriptState,
247}
248
249#[cfg(any(test, feature = "testing"))]
250pub struct Process {
251 pub advice: AdviceProvider,
252 pub system: System,
253 pub decoder: Decoder,
254 pub stack: Stack,
255 pub range: RangeChecker,
256 pub chiplets: Chiplets,
257 pub max_cycles: u32,
258 pub enable_tracing: bool,
259 pub pc_transcript_state: PrecompileTranscriptState,
261 #[cfg(test)]
263 pub decorator_retrieval_count: core::cell::Cell<usize>,
264}
265
266impl Process {
267 pub fn new(
271 kernel: Kernel,
272 stack_inputs: StackInputs,
273 advice_inputs: AdviceInputs,
274 execution_options: ExecutionOptions,
275 ) -> Self {
276 Self::initialize(kernel, stack_inputs, advice_inputs, execution_options)
277 }
278
279 pub fn new_debug(
281 kernel: Kernel,
282 stack_inputs: StackInputs,
283 advice_inputs: AdviceInputs,
284 ) -> Self {
285 Self::initialize(
286 kernel,
287 stack_inputs,
288 advice_inputs,
289 ExecutionOptions::default().with_tracing().with_debugging(true),
290 )
291 }
292
293 fn initialize(
294 kernel: Kernel,
295 stack: StackInputs,
296 advice_inputs: AdviceInputs,
297 execution_options: ExecutionOptions,
298 ) -> Self {
299 let in_debug_mode =
300 execution_options.enable_debugging() || execution_options.enable_tracing();
301 Self {
302 advice: advice_inputs.into(),
303 system: System::new(execution_options.expected_cycles() as usize),
304 decoder: Decoder::new(in_debug_mode),
305 stack: Stack::new(&stack, execution_options.expected_cycles() as usize, in_debug_mode),
306 range: RangeChecker::new(),
307 chiplets: Chiplets::new(kernel),
308 max_cycles: execution_options.max_cycles(),
309 enable_tracing: execution_options.enable_tracing(),
310 pc_transcript_state: PrecompileTranscriptState::default(),
311 #[cfg(test)]
312 decorator_retrieval_count: core::cell::Cell::new(0),
313 }
314 }
315
316 #[cfg(test)]
317 #[inline(always)]
318 fn record_decorator_retrieval(&self) {
319 self.decorator_retrieval_count.set(self.decorator_retrieval_count.get() + 1);
320 }
321
322 #[cfg(not(test))]
323 #[inline(always)]
324 fn record_decorator_retrieval(&self) {}
325
326 pub fn execute(
331 &mut self,
332 program: &Program,
333 host: &mut impl SyncHost,
334 ) -> Result<StackOutputs, ExecutionError> {
335 if self.system.clk() != 0 {
336 return Err(ExecutionError::ProgramAlreadyExecuted);
337 }
338
339 self.advice
340 .extend_map(program.mast_forest().advice_map())
341 .map_err(|err| ExecutionError::advice_error(err, RowIndex::from(0), &()))?;
342
343 self.execute_mast_node(program.entrypoint(), &program.mast_forest().clone(), host)?;
344
345 self.stack.build_stack_outputs()
346 }
347
348 fn execute_mast_node(
352 &mut self,
353 node_id: MastNodeId,
354 program: &MastForest,
355 host: &mut impl SyncHost,
356 ) -> Result<(), ExecutionError> {
357 let node = program
358 .get_node_by_id(node_id)
359 .ok_or(ExecutionError::MastNodeNotFoundInForest { node_id })?;
360
361 if self.decoder.in_debug_mode() {
362 self.record_decorator_retrieval();
363 for &decorator_id in node.before_enter(program) {
364 self.execute_decorator(&program[decorator_id], host)?;
365 }
366 }
367
368 match node {
369 MastNode::Block(node) => self.execute_basic_block_node(node_id, node, program, host)?,
370 MastNode::Join(node) => self.execute_join_node(node, program, host)?,
371 MastNode::Split(node) => self.execute_split_node(node, program, host)?,
372 MastNode::Loop(node) => self.execute_loop_node(node, program, host)?,
373 MastNode::Call(node) => {
374 let err_ctx = err_ctx!(program, node, host, self.decoder.in_debug_mode());
375 add_error_ctx_to_external_error(
376 self.execute_call_node(node, program, host),
377 err_ctx,
378 )?
379 },
380 MastNode::Dyn(node) => {
381 let err_ctx = err_ctx!(program, node, host, self.decoder.in_debug_mode());
382 add_error_ctx_to_external_error(
383 self.execute_dyn_node(node, program, host),
384 err_ctx,
385 )?
386 },
387 MastNode::External(external_node) => {
388 let (root_id, mast_forest) = self.resolve_external_node(external_node, host)?;
389
390 self.execute_mast_node(root_id, &mast_forest, host)?;
391 },
392 }
393
394 if self.decoder.in_debug_mode() {
395 self.record_decorator_retrieval();
396 for &decorator_id in node.after_exit(program) {
397 self.execute_decorator(&program[decorator_id], host)?;
398 }
399 }
400
401 Ok(())
402 }
403
404 #[inline(always)]
406 fn execute_join_node(
407 &mut self,
408 node: &JoinNode,
409 program: &MastForest,
410 host: &mut impl SyncHost,
411 ) -> Result<(), ExecutionError> {
412 self.start_join_node(node, program, host)?;
413
414 self.execute_mast_node(node.first(), program, host)?;
416 self.execute_mast_node(node.second(), program, host)?;
417
418 self.end_join_node(node, program, host)
419 }
420
421 #[inline(always)]
423 fn execute_split_node(
424 &mut self,
425 node: &SplitNode,
426 program: &MastForest,
427 host: &mut impl SyncHost,
428 ) -> Result<(), ExecutionError> {
429 let condition = self.start_split_node(node, program, host)?;
431
432 if condition == ONE {
434 self.execute_mast_node(node.on_true(), program, host)?;
435 } else if condition == ZERO {
436 self.execute_mast_node(node.on_false(), program, host)?;
437 } else {
438 let err_ctx = err_ctx!(program, node, host, self.decoder.in_debug_mode());
439 return Err(ExecutionError::not_binary_value_if(condition, &err_ctx));
440 }
441
442 self.end_split_node(node, program, host)
443 }
444
445 #[inline(always)]
447 fn execute_loop_node(
448 &mut self,
449 node: &LoopNode,
450 program: &MastForest,
451 host: &mut impl SyncHost,
452 ) -> Result<(), ExecutionError> {
453 let condition = self.start_loop_node(node, program, host)?;
455
456 if condition == ONE {
458 self.execute_mast_node(node.body(), program, host)?;
460
461 while self.stack.peek() == ONE {
465 self.decoder.repeat();
466 self.execute_op(Operation::Drop, program, host)?;
467 self.execute_mast_node(node.body(), program, host)?;
468 }
469
470 if self.stack.peek() != ZERO {
471 let err_ctx = err_ctx!(program, node, host, self.decoder.in_debug_mode());
472 return Err(ExecutionError::not_binary_value_loop(self.stack.peek(), &err_ctx));
473 }
474
475 self.end_loop_node(node, true, program, host)
477 } else if condition == ZERO {
478 self.end_loop_node(node, false, program, host)
481 } else {
482 let err_ctx = err_ctx!(program, node, host, self.decoder.in_debug_mode());
483 Err(ExecutionError::not_binary_value_loop(condition, &err_ctx))
484 }
485 }
486
487 #[inline(always)]
489 fn execute_call_node(
490 &mut self,
491 call_node: &CallNode,
492 program: &MastForest,
493 host: &mut impl SyncHost,
494 ) -> Result<(), ExecutionError> {
495 if call_node.is_syscall() {
497 let callee = program.get_node_by_id(call_node.callee()).ok_or_else(|| {
498 ExecutionError::MastNodeNotFoundInForest { node_id: call_node.callee() }
499 })?;
500 let err_ctx = err_ctx!(program, call_node, host, self.decoder.in_debug_mode());
501 self.chiplets.kernel_rom.access_proc(callee.digest(), &err_ctx)?;
502 }
503 let err_ctx = err_ctx!(program, call_node, host, self.decoder.in_debug_mode());
504
505 self.start_call_node(call_node, program, host, &err_ctx)?;
506 self.execute_mast_node(call_node.callee(), program, host)?;
507 self.end_call_node(call_node, program, host, &err_ctx)
508 }
509
510 #[inline(always)]
515 fn execute_dyn_node(
516 &mut self,
517 node: &DynNode,
518 program: &MastForest,
519 host: &mut impl SyncHost,
520 ) -> Result<(), ExecutionError> {
521 let err_ctx = err_ctx!(program, node, host, self.decoder.in_debug_mode());
522
523 let callee_hash = if node.is_dyncall() {
524 self.start_dyncall_node(node, &err_ctx)?
525 } else {
526 self.start_dyn_node(node, program, host, &err_ctx)?
527 };
528
529 match program.find_procedure_root(callee_hash) {
533 Some(callee_id) => self.execute_mast_node(callee_id, program, host)?,
534 None => {
535 let mast_forest = host
536 .get_mast_forest(&callee_hash)
537 .ok_or_else(|| ExecutionError::dynamic_node_not_found(callee_hash, &err_ctx))?;
538
539 let root_id = mast_forest
542 .find_procedure_root(callee_hash)
543 .ok_or(ExecutionError::malfored_mast_forest_in_host(callee_hash, &()))?;
544
545 self.advice
551 .extend_map(mast_forest.advice_map())
552 .map_err(|err| ExecutionError::advice_error(err, self.system.clk(), &()))?;
553
554 self.execute_mast_node(root_id, &mast_forest, host)?
555 },
556 }
557
558 if node.is_dyncall() {
559 self.end_dyncall_node(node, program, host, &err_ctx)
560 } else {
561 self.end_dyn_node(node, program, host)
562 }
563 }
564
565 #[inline(always)]
571 fn execute_basic_block_node(
572 &mut self,
573 node_id: MastNodeId,
574 basic_block: &BasicBlockNode,
575 program: &MastForest,
576 host: &mut impl SyncHost,
577 ) -> Result<(), ExecutionError> {
578 self.start_basic_block_node(basic_block, program, host)?;
579
580 let mut op_offset = 0;
581
582 self.execute_op_batch(basic_block, &basic_block.op_batches()[0], op_offset, program, host)?;
584 op_offset += basic_block.op_batches()[0].ops().len();
585
586 for op_batch in basic_block.op_batches().iter().skip(1) {
590 self.respan(op_batch);
591 self.execute_op(Operation::Noop, program, host)?;
592 self.execute_op_batch(basic_block, op_batch, op_offset, program, host)?;
593 op_offset += op_batch.ops().len();
594 }
595
596 self.end_basic_block_node(basic_block, program, host)?;
597
598 if self.decoder.in_debug_mode() {
603 let num_ops = basic_block.num_operations() as usize;
604 self.record_decorator_retrieval();
605 for decorator in program.decorators_for_op(node_id, num_ops) {
606 self.execute_decorator(decorator, host)?;
607 }
608 }
609
610 Ok(())
611 }
612
613 #[inline(always)]
620 fn execute_op_batch(
621 &mut self,
622 basic_block: &BasicBlockNode,
623 batch: &OpBatch,
624 op_offset: usize,
625 program: &MastForest,
626 host: &mut impl SyncHost,
627 ) -> Result<(), ExecutionError> {
628 let end_indices = batch.end_indices();
629 let mut op_idx = 0;
630 let mut group_idx = 0;
631 let mut next_group_idx = 1;
632
633 let num_batch_groups = batch.num_groups().next_power_of_two();
637
638 let node_id = basic_block
640 .linked_id()
641 .expect("basic block node should be linked when executing operations");
642
643 for (i, &op) in batch.ops().iter().enumerate() {
645 if self.decoder.in_debug_mode() {
646 let current_op_idx = i + op_offset;
647 self.record_decorator_retrieval();
648 for decorator in program.decorators_for_op(node_id, current_op_idx) {
649 self.execute_decorator(decorator, host)?;
650 }
651 }
652
653 let err_ctx =
655 err_ctx!(program, basic_block, host, self.decoder.in_debug_mode(), i + op_offset);
656 self.decoder.execute_user_op(op, op_idx);
657 self.execute_op_with_error_ctx(op, program, host, &err_ctx)?;
658
659 let has_imm = op.imm_value().is_some();
662 if has_imm {
663 next_group_idx += 1;
664 }
665
666 if i + 1 == end_indices[group_idx] {
668 group_idx = next_group_idx;
670 next_group_idx += 1;
671 op_idx = 0;
672
673 if group_idx < num_batch_groups {
676 self.decoder.start_op_group(batch.groups()[group_idx]);
677 }
678 } else {
679 op_idx += 1;
681 }
682 }
683
684 Ok(())
685 }
686
687 fn execute_decorator(
689 &mut self,
690 decorator: &Decorator,
691 host: &mut impl SyncHost,
692 ) -> Result<(), ExecutionError> {
693 match decorator {
694 Decorator::Debug(options) => {
695 if self.decoder.in_debug_mode() {
696 let process = &mut self.state();
697 let clk = process.clk();
698 host.on_debug(process, options)
699 .map_err(|err| ExecutionError::DebugHandlerError { clk, err })?;
700 }
701 },
702 Decorator::AsmOp(assembly_op) => {
703 if self.decoder.in_debug_mode() {
704 self.decoder.append_asmop(self.system.clk(), assembly_op.clone());
705 }
706 },
707 Decorator::Trace(id) => {
708 if self.enable_tracing {
709 let process = &mut self.state();
710 let clk = process.clk();
711 host.on_trace(process, *id).map_err(|err| {
712 ExecutionError::TraceHandlerError { clk, trace_id: *id, err }
713 })?;
714 }
715 },
716 };
717 Ok(())
718 }
719
720 fn resolve_external_node(
725 &mut self,
726 external_node: &ExternalNode,
727 host: &impl SyncHost,
728 ) -> Result<(MastNodeId, Arc<MastForest>), ExecutionError> {
729 let node_digest = external_node.digest();
730
731 let mast_forest = host
732 .get_mast_forest(&node_digest)
733 .ok_or(ExecutionError::no_mast_forest_with_procedure(node_digest, &()))?;
734
735 let root_id = mast_forest
738 .find_procedure_root(node_digest)
739 .ok_or(ExecutionError::malfored_mast_forest_in_host(node_digest, &()))?;
740
741 if mast_forest[root_id].is_external() {
744 return Err(ExecutionError::CircularExternalNode(node_digest));
745 }
746
747 self.advice
753 .extend_map(mast_forest.advice_map())
754 .map_err(|err| ExecutionError::advice_error(err, self.system.clk(), &()))?;
755
756 Ok((root_id, mast_forest))
757 }
758
759 pub const fn kernel(&self) -> &Kernel {
763 self.chiplets.kernel_rom.kernel()
764 }
765
766 pub fn into_parts(
767 self,
768 ) -> (System, Decoder, Stack, RangeChecker, Chiplets, PrecompileTranscriptState) {
769 (
770 self.system,
771 self.decoder,
772 self.stack,
773 self.range,
774 self.chiplets,
775 self.pc_transcript_state,
776 )
777 }
778}
779
780#[derive(Debug)]
781pub struct SlowProcessState<'a> {
782 advice: &'a mut AdviceProvider,
783 system: &'a System,
784 stack: &'a Stack,
785 chiplets: &'a Chiplets,
786}
787
788#[derive(Debug)]
792pub enum ProcessState<'a> {
793 Slow(SlowProcessState<'a>),
794 Fast(FastProcessState<'a>),
795 Noop(()),
798}
799
800impl Process {
801 #[inline(always)]
802 pub fn state(&mut self) -> ProcessState<'_> {
803 ProcessState::Slow(SlowProcessState {
804 advice: &mut self.advice,
805 system: &self.system,
806 stack: &self.stack,
807 chiplets: &self.chiplets,
808 })
809 }
810}
811
812impl<'a> ProcessState<'a> {
813 #[inline(always)]
815 pub fn advice_provider(&self) -> &AdviceProvider {
816 match self {
817 ProcessState::Slow(state) => state.advice,
818 ProcessState::Fast(state) => &state.processor.advice,
819 ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
820 }
821 }
822
823 #[inline(always)]
825 pub fn advice_provider_mut(&mut self) -> &mut AdviceProvider {
826 match self {
827 ProcessState::Slow(state) => state.advice,
828 ProcessState::Fast(state) => &mut state.processor.advice,
829 ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
830 }
831 }
832
833 #[inline(always)]
835 pub fn clk(&self) -> RowIndex {
836 match self {
837 ProcessState::Slow(state) => state.system.clk(),
838 ProcessState::Fast(state) => state.processor.clk,
839 ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
840 }
841 }
842
843 #[inline(always)]
845 pub fn ctx(&self) -> ContextId {
846 match self {
847 ProcessState::Slow(state) => state.system.ctx(),
848 ProcessState::Fast(state) => state.processor.ctx,
849 ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
850 }
851 }
852
853 #[inline(always)]
857 pub fn get_stack_item(&self, pos: usize) -> Felt {
858 match self {
859 ProcessState::Slow(state) => state.stack.get(pos),
860 ProcessState::Fast(state) => state.processor.stack_get(pos),
861 ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
862 }
863 }
864
865 #[inline(always)]
879 pub fn get_stack_word_be(&self, start_idx: usize) -> Word {
880 match self {
881 ProcessState::Slow(state) => state.stack.get_word(start_idx),
882 ProcessState::Fast(state) => state.processor.stack_get_word(start_idx),
883 ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
884 }
885 }
886
887 #[inline(always)]
901 pub fn get_stack_word_le(&self, start_idx: usize) -> Word {
902 let mut word = self.get_stack_word_be(start_idx);
903 word.reverse();
904 word
905 }
906
907 #[deprecated(
915 since = "0.19.0",
916 note = "Use `get_stack_word_be()` or `get_stack_word_le()` to make endianness explicit"
917 )]
918 #[inline(always)]
919 pub fn get_stack_word(&self, start_idx: usize) -> Word {
920 self.get_stack_word_be(start_idx)
921 }
922
923 #[inline(always)]
926 pub fn get_stack_state(&self) -> Vec<Felt> {
927 match self {
928 ProcessState::Slow(state) => state.stack.get_state_at(state.system.clk()),
929 ProcessState::Fast(state) => state.processor.stack().iter().rev().copied().collect(),
930 ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
931 }
932 }
933
934 #[inline(always)]
937 pub fn get_mem_value(&self, ctx: ContextId, addr: u32) -> Option<Felt> {
938 match self {
939 ProcessState::Slow(state) => state.chiplets.memory.get_value(ctx, addr),
940 ProcessState::Fast(state) => state.processor.memory.read_element_impl(ctx, addr),
941 ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
942 }
943 }
944
945 #[inline(always)]
950 pub fn get_mem_word(&self, ctx: ContextId, addr: u32) -> Result<Option<Word>, MemoryError> {
951 match self {
952 ProcessState::Slow(state) => state.chiplets.memory.get_word(ctx, addr),
953 ProcessState::Fast(state) => {
954 state.processor.memory.read_word_impl(ctx, addr, None, &())
955 },
956 ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
957 }
958 }
959
960 pub fn get_mem_addr_range(
963 &self,
964 start_idx: usize,
965 end_idx: usize,
966 ) -> Result<core::ops::Range<u32>, MemoryError> {
967 let start_addr = self.get_stack_item(start_idx).as_int();
968 let end_addr = self.get_stack_item(end_idx).as_int();
969
970 if start_addr > u32::MAX as u64 {
971 return Err(MemoryError::address_out_of_bounds(start_addr, &()));
972 }
973 if end_addr > u32::MAX as u64 {
974 return Err(MemoryError::address_out_of_bounds(end_addr, &()));
975 }
976
977 if start_addr > end_addr {
978 return Err(MemoryError::InvalidMemoryRange { start_addr, end_addr });
979 }
980
981 Ok(start_addr as u32..end_addr as u32)
982 }
983
984 #[inline(always)]
990 pub fn get_mem_state(&self, ctx: ContextId) -> Vec<(MemoryAddress, Felt)> {
991 match self {
992 ProcessState::Slow(state) => {
993 state.chiplets.memory.get_state_at(ctx, state.system.clk())
994 },
995 ProcessState::Fast(state) => state.processor.memory.get_memory_state(ctx),
996 ProcessState::Noop(()) => panic!("attempted to access Noop process state"),
997 }
998 }
999}
1000
1001impl<'a> From<&'a mut Process> for ProcessState<'a> {
1002 fn from(process: &'a mut Process) -> Self {
1003 process.state()
1004 }
1005}
1006
1007pub(crate) fn add_error_ctx_to_external_error(
1013 result: Result<(), ExecutionError>,
1014 err_ctx: impl ErrorContext,
1015) -> Result<(), ExecutionError> {
1016 match result {
1017 Ok(_) => Ok(()),
1018 Err(err) => match err {
1020 ExecutionError::NoMastForestWithProcedure { label, source_file: _, root_digest }
1021 | ExecutionError::MalformedMastForestInHost { label, source_file: _, root_digest } => {
1022 if label == SourceSpan::UNKNOWN {
1023 let err_with_ctx =
1024 ExecutionError::no_mast_forest_with_procedure(root_digest, &err_ctx);
1025 Err(err_with_ctx)
1026 } else {
1027 Err(err)
1031 }
1032 },
1033
1034 _ => {
1035 Err(err)
1037 },
1038 },
1039 }
1040}