1#![no_std]
2
3#[macro_use]
4extern crate alloc;
5
6#[cfg(feature = "std")]
7extern crate std;
8
9use alloc::{sync::Arc, vec::Vec};
10use core::fmt::{Display, LowerHex};
11
12use miden_air::trace::{
13 CHIPLETS_WIDTH, DECODER_TRACE_WIDTH, MIN_TRACE_LEN, RANGE_CHECK_TRACE_WIDTH, STACK_TRACE_WIDTH,
14 SYS_TRACE_WIDTH,
15};
16pub use miden_air::{ExecutionOptions, ExecutionOptionsError, RowIndex};
17pub use miden_core::{
18 AssemblyOp, EMPTY_WORD, Felt, Kernel, ONE, Operation, Program, ProgramInfo, QuadExtension,
19 StackInputs, StackOutputs, Word, ZERO,
20 crypto::merkle::SMT_DEPTH,
21 errors::InputError,
22 mast::{MastForest, MastNode, MastNodeId},
23 sys_events::SystemEvent,
24 utils::{DeserializationError, collections::KvMap},
25};
26use miden_core::{
27 Decorator, DecoratorIterator, FieldElement, WORD_SIZE,
28 mast::{
29 BasicBlockNode, CallNode, DynNode, ExternalNode, JoinNode, LoopNode, OP_GROUP_SIZE,
30 OpBatch, SplitNode,
31 },
32};
33use miden_debug_types::SourceSpan;
34pub use winter_prover::matrix::ColMatrix;
35
36pub(crate) mod continuation_stack;
37
38pub mod fast;
39use fast::FastProcessState;
40
41mod operations;
42
43mod system;
44use system::System;
45pub use system::{ContextId, FMP_MIN, SYSCALL_FMP_MIN};
46
47mod decoder;
48use decoder::Decoder;
49
50mod stack;
51use stack::Stack;
52
53mod range;
54use range::RangeChecker;
55
56mod host;
57pub use host::{
58 AdviceMutation, AsyncHost, BaseHost, FutureMaybeSend, MastForestStore, MemMastForestStore,
59 SyncHost,
60 advice::{AdviceError, AdviceInputs, AdviceProvider},
61 default::{DefaultDebugHandler, DefaultHost, HostLibrary},
62 handlers::{DebugHandler, EventError, EventHandler, EventHandlerRegistry},
63};
64
65mod chiplets;
66use chiplets::Chiplets;
67pub use chiplets::MemoryError;
68
69mod trace;
70use trace::TraceFragment;
71pub use trace::{ChipletsLengths, ExecutionTrace, NUM_RAND_ROWS, TraceLenSummary};
72
73mod errors;
74pub use errors::{ErrorContext, ErrorContextImpl, ExecutionError};
75
76pub mod utils;
77
78#[cfg(test)]
79mod tests;
80
81mod debug;
82pub use debug::{AsmOpInfo, VmState, VmStateIterator};
83
84pub mod math {
88 pub use miden_core::{Felt, FieldElement, StarkField};
89 pub use winter_prover::math::fft;
90}
91
92pub mod crypto {
93 pub use miden_core::crypto::{
94 hash::{Blake3_192, Blake3_256, ElementHasher, Hasher, Rpo256, Rpx256},
95 merkle::{
96 MerkleError, MerklePath, MerkleStore, MerkleTree, NodeIndex, PartialMerkleTree,
97 SimpleSmt,
98 },
99 random::{RandomCoin, RpoRandomCoin, RpxRandomCoin, WinterRandomCoin},
100 };
101}
102
103#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
107pub struct MemoryAddress(u32);
108
109impl From<u32> for MemoryAddress {
110 fn from(addr: u32) -> Self {
111 MemoryAddress(addr)
112 }
113}
114
115impl From<MemoryAddress> for u32 {
116 fn from(value: MemoryAddress) -> Self {
117 value.0
118 }
119}
120
121impl Display for MemoryAddress {
122 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
123 Display::fmt(&self.0, f)
124 }
125}
126
127impl LowerHex for MemoryAddress {
128 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
129 LowerHex::fmt(&self.0, f)
130 }
131}
132
133impl core::ops::Add<MemoryAddress> for MemoryAddress {
134 type Output = Self;
135
136 fn add(self, rhs: MemoryAddress) -> Self::Output {
137 MemoryAddress(self.0 + rhs.0)
138 }
139}
140
141impl core::ops::Add<u32> for MemoryAddress {
142 type Output = Self;
143
144 fn add(self, rhs: u32) -> Self::Output {
145 MemoryAddress(self.0 + rhs)
146 }
147}
148
149type SysTrace = [Vec<Felt>; SYS_TRACE_WIDTH];
150
151pub struct DecoderTrace {
152 trace: [Vec<Felt>; DECODER_TRACE_WIDTH],
153 aux_builder: decoder::AuxTraceBuilder,
154}
155
156pub struct StackTrace {
157 trace: [Vec<Felt>; STACK_TRACE_WIDTH],
158}
159
160pub struct RangeCheckTrace {
161 trace: [Vec<Felt>; RANGE_CHECK_TRACE_WIDTH],
162 aux_builder: range::AuxTraceBuilder,
163}
164
165pub struct ChipletsTrace {
166 trace: [Vec<Felt>; CHIPLETS_WIDTH],
167 aux_builder: chiplets::AuxTraceBuilder,
168}
169
170#[tracing::instrument("execute_program", skip_all)]
179pub fn execute(
180 program: &Program,
181 stack_inputs: StackInputs,
182 advice_inputs: AdviceInputs,
183 host: &mut impl SyncHost,
184 options: ExecutionOptions,
185) -> Result<ExecutionTrace, ExecutionError> {
186 let mut process = Process::new(program.kernel().clone(), stack_inputs, advice_inputs, options);
187 let stack_outputs = process.execute(program, host)?;
188 let trace = ExecutionTrace::new(process, stack_outputs);
189 assert_eq!(&program.hash(), trace.program_hash(), "inconsistent program hash");
190 Ok(trace)
191}
192
193pub fn execute_iter(
196 program: &Program,
197 stack_inputs: StackInputs,
198 advice_inputs: AdviceInputs,
199 host: &mut impl SyncHost,
200) -> VmStateIterator {
201 let mut process = Process::new_debug(program.kernel().clone(), stack_inputs, advice_inputs);
202 let result = process.execute(program, host);
203 if result.is_ok() {
204 assert_eq!(
205 program.hash(),
206 process.decoder.program_hash().into(),
207 "inconsistent program hash"
208 );
209 }
210 VmStateIterator::new(process, result)
211}
212
213#[cfg(not(any(test, feature = "testing")))]
226pub struct Process {
227 advice: AdviceProvider,
228 system: System,
229 decoder: Decoder,
230 stack: Stack,
231 range: RangeChecker,
232 chiplets: Chiplets,
233 max_cycles: u32,
234 enable_tracing: bool,
235}
236
237#[cfg(any(test, feature = "testing"))]
238pub struct Process {
239 pub advice: AdviceProvider,
240 pub system: System,
241 pub decoder: Decoder,
242 pub stack: Stack,
243 pub range: RangeChecker,
244 pub chiplets: Chiplets,
245 pub max_cycles: u32,
246 pub enable_tracing: bool,
247}
248
249impl Process {
250 pub fn new(
254 kernel: Kernel,
255 stack_inputs: StackInputs,
256 advice_inputs: AdviceInputs,
257 execution_options: ExecutionOptions,
258 ) -> Self {
259 Self::initialize(kernel, stack_inputs, advice_inputs, execution_options)
260 }
261
262 pub fn new_debug(
264 kernel: Kernel,
265 stack_inputs: StackInputs,
266 advice_inputs: AdviceInputs,
267 ) -> Self {
268 Self::initialize(
269 kernel,
270 stack_inputs,
271 advice_inputs,
272 ExecutionOptions::default().with_tracing().with_debugging(true),
273 )
274 }
275
276 fn initialize(
277 kernel: Kernel,
278 stack: StackInputs,
279 advice_inputs: AdviceInputs,
280 execution_options: ExecutionOptions,
281 ) -> Self {
282 let in_debug_mode = execution_options.enable_debugging();
283 Self {
284 advice: advice_inputs.into(),
285 system: System::new(execution_options.expected_cycles() as usize),
286 decoder: Decoder::new(in_debug_mode),
287 stack: Stack::new(&stack, execution_options.expected_cycles() as usize, in_debug_mode),
288 range: RangeChecker::new(),
289 chiplets: Chiplets::new(kernel),
290 max_cycles: execution_options.max_cycles(),
291 enable_tracing: execution_options.enable_tracing(),
292 }
293 }
294
295 pub fn execute(
300 &mut self,
301 program: &Program,
302 host: &mut impl SyncHost,
303 ) -> Result<StackOutputs, ExecutionError> {
304 if self.system.clk() != 0 {
305 return Err(ExecutionError::ProgramAlreadyExecuted);
306 }
307
308 self.advice
309 .extend_map(program.mast_forest().advice_map())
310 .map_err(|err| ExecutionError::advice_error(err, RowIndex::from(0), &()))?;
311
312 self.execute_mast_node(program.entrypoint(), &program.mast_forest().clone(), host)?;
313
314 self.stack.build_stack_outputs()
315 }
316
317 fn execute_mast_node(
321 &mut self,
322 node_id: MastNodeId,
323 program: &MastForest,
324 host: &mut impl SyncHost,
325 ) -> Result<(), ExecutionError> {
326 let node = program
327 .get_node_by_id(node_id)
328 .ok_or(ExecutionError::MastNodeNotFoundInForest { node_id })?;
329
330 for &decorator_id in node.before_enter() {
331 self.execute_decorator(&program[decorator_id], host)?;
332 }
333
334 match node {
335 MastNode::Block(node) => self.execute_basic_block_node(node, program, host)?,
336 MastNode::Join(node) => self.execute_join_node(node, program, host)?,
337 MastNode::Split(node) => self.execute_split_node(node, program, host)?,
338 MastNode::Loop(node) => self.execute_loop_node(node, program, host)?,
339 MastNode::Call(node) => {
340 let err_ctx = err_ctx!(program, node, host);
341 add_error_ctx_to_external_error(
342 self.execute_call_node(node, program, host),
343 err_ctx,
344 )?
345 },
346 MastNode::Dyn(node) => {
347 let err_ctx = err_ctx!(program, node, host);
348 add_error_ctx_to_external_error(
349 self.execute_dyn_node(node, program, host),
350 err_ctx,
351 )?
352 },
353 MastNode::External(external_node) => {
354 let (root_id, mast_forest) = self.resolve_external_node(external_node, host)?;
355
356 self.execute_mast_node(root_id, &mast_forest, host)?;
357 },
358 }
359
360 for &decorator_id in node.after_exit() {
361 self.execute_decorator(&program[decorator_id], host)?;
362 }
363
364 Ok(())
365 }
366
367 #[inline(always)]
369 fn execute_join_node(
370 &mut self,
371 node: &JoinNode,
372 program: &MastForest,
373 host: &mut impl SyncHost,
374 ) -> Result<(), ExecutionError> {
375 self.start_join_node(node, program, host)?;
376
377 self.execute_mast_node(node.first(), program, host)?;
379 self.execute_mast_node(node.second(), program, host)?;
380
381 self.end_join_node(node, program, host)
382 }
383
384 #[inline(always)]
386 fn execute_split_node(
387 &mut self,
388 node: &SplitNode,
389 program: &MastForest,
390 host: &mut impl SyncHost,
391 ) -> Result<(), ExecutionError> {
392 let condition = self.start_split_node(node, program, host)?;
394
395 if condition == ONE {
397 self.execute_mast_node(node.on_true(), program, host)?;
398 } else if condition == ZERO {
399 self.execute_mast_node(node.on_false(), program, host)?;
400 } else {
401 let err_ctx = err_ctx!(program, node, host);
402 return Err(ExecutionError::not_binary_value_if(condition, &err_ctx));
403 }
404
405 self.end_split_node(node, program, host)
406 }
407
408 #[inline(always)]
410 fn execute_loop_node(
411 &mut self,
412 node: &LoopNode,
413 program: &MastForest,
414 host: &mut impl SyncHost,
415 ) -> Result<(), ExecutionError> {
416 let condition = self.start_loop_node(node, program, host)?;
418
419 if condition == ONE {
421 self.execute_mast_node(node.body(), program, host)?;
423
424 while self.stack.peek() == ONE {
428 self.decoder.repeat();
429 self.execute_op(Operation::Drop, program, host)?;
430 self.execute_mast_node(node.body(), program, host)?;
431 }
432
433 if self.stack.peek() != ZERO {
434 let err_ctx = err_ctx!(program, node, host);
435 return Err(ExecutionError::not_binary_value_loop(self.stack.peek(), &err_ctx));
436 }
437
438 self.end_loop_node(node, true, program, host)
440 } else if condition == ZERO {
441 self.end_loop_node(node, false, program, host)
444 } else {
445 let err_ctx = err_ctx!(program, node, host);
446 Err(ExecutionError::not_binary_value_loop(condition, &err_ctx))
447 }
448 }
449
450 #[inline(always)]
452 fn execute_call_node(
453 &mut self,
454 call_node: &CallNode,
455 program: &MastForest,
456 host: &mut impl SyncHost,
457 ) -> Result<(), ExecutionError> {
458 if self.system.in_syscall() {
460 let instruction = if call_node.is_syscall() { "syscall" } else { "call" };
461 return Err(ExecutionError::CallInSyscall(instruction));
462 }
463
464 if call_node.is_syscall() {
466 let callee = program.get_node_by_id(call_node.callee()).ok_or_else(|| {
467 ExecutionError::MastNodeNotFoundInForest { node_id: call_node.callee() }
468 })?;
469 let err_ctx = err_ctx!(program, call_node, host);
470 self.chiplets.kernel_rom.access_proc(callee.digest(), &err_ctx)?;
471 }
472 let err_ctx = err_ctx!(program, call_node, host);
473
474 self.start_call_node(call_node, program, host)?;
475 self.execute_mast_node(call_node.callee(), program, host)?;
476 self.end_call_node(call_node, program, host, &err_ctx)
477 }
478
479 #[inline(always)]
484 fn execute_dyn_node(
485 &mut self,
486 node: &DynNode,
487 program: &MastForest,
488 host: &mut impl SyncHost,
489 ) -> Result<(), ExecutionError> {
490 if node.is_dyncall() && self.system.in_syscall() {
492 return Err(ExecutionError::CallInSyscall("dyncall"));
493 }
494
495 let err_ctx = err_ctx!(program, node, host);
496
497 let callee_hash = if node.is_dyncall() {
498 self.start_dyncall_node(node, &err_ctx)?
499 } else {
500 self.start_dyn_node(node, program, host, &err_ctx)?
501 };
502
503 match program.find_procedure_root(callee_hash) {
507 Some(callee_id) => self.execute_mast_node(callee_id, program, host)?,
508 None => {
509 let mast_forest = host
510 .get_mast_forest(&callee_hash)
511 .ok_or_else(|| ExecutionError::dynamic_node_not_found(callee_hash, &err_ctx))?;
512
513 let root_id = mast_forest
516 .find_procedure_root(callee_hash)
517 .ok_or(ExecutionError::malfored_mast_forest_in_host(callee_hash, &()))?;
518
519 self.advice
525 .extend_map(mast_forest.advice_map())
526 .map_err(|err| ExecutionError::advice_error(err, self.system.clk(), &()))?;
527
528 self.execute_mast_node(root_id, &mast_forest, host)?
529 },
530 }
531
532 if node.is_dyncall() {
533 self.end_dyncall_node(node, program, host, &err_ctx)
534 } else {
535 self.end_dyn_node(node, program, host)
536 }
537 }
538
539 #[inline(always)]
541 fn execute_basic_block_node(
542 &mut self,
543 basic_block: &BasicBlockNode,
544 program: &MastForest,
545 host: &mut impl SyncHost,
546 ) -> Result<(), ExecutionError> {
547 self.start_basic_block_node(basic_block, program, host)?;
548
549 let mut op_offset = 0;
550 let mut decorator_ids = basic_block.decorator_iter();
551
552 self.execute_op_batch(
554 basic_block,
555 &basic_block.op_batches()[0],
556 &mut decorator_ids,
557 op_offset,
558 program,
559 host,
560 )?;
561 op_offset += basic_block.op_batches()[0].ops().len();
562
563 for op_batch in basic_block.op_batches().iter().skip(1) {
567 self.respan(op_batch);
568 self.execute_op(Operation::Noop, program, host)?;
569 self.execute_op_batch(
570 basic_block,
571 op_batch,
572 &mut decorator_ids,
573 op_offset,
574 program,
575 host,
576 )?;
577 op_offset += op_batch.ops().len();
578 }
579
580 self.end_basic_block_node(basic_block, program, host)?;
581
582 for &decorator_id in decorator_ids {
587 let decorator = program
588 .get_decorator_by_id(decorator_id)
589 .ok_or(ExecutionError::DecoratorNotFoundInForest { decorator_id })?;
590 self.execute_decorator(decorator, host)?;
591 }
592
593 Ok(())
594 }
595
596 #[inline(always)]
603 fn execute_op_batch(
604 &mut self,
605 basic_block: &BasicBlockNode,
606 batch: &OpBatch,
607 decorators: &mut DecoratorIterator,
608 op_offset: usize,
609 program: &MastForest,
610 host: &mut impl SyncHost,
611 ) -> Result<(), ExecutionError> {
612 let op_counts = batch.op_counts();
613 let mut op_idx = 0;
614 let mut group_idx = 0;
615 let mut next_group_idx = 1;
616
617 let num_batch_groups = batch.num_groups().next_power_of_two();
621
622 for (i, &op) in batch.ops().iter().enumerate() {
624 while let Some(&decorator_id) = decorators.next_filtered(i + op_offset) {
625 let decorator = program
626 .get_decorator_by_id(decorator_id)
627 .ok_or(ExecutionError::DecoratorNotFoundInForest { decorator_id })?;
628 self.execute_decorator(decorator, host)?;
629 }
630
631 let err_ctx = err_ctx!(program, basic_block, host, i + op_offset);
633 self.decoder.execute_user_op(op, op_idx);
634 self.execute_op_with_error_ctx(op, program, host, &err_ctx)?;
635
636 let has_imm = op.imm_value().is_some();
639 if has_imm {
640 next_group_idx += 1;
641 }
642
643 if op_idx == op_counts[group_idx] - 1 {
645 if has_imm {
648 debug_assert!(op_idx < OP_GROUP_SIZE - 1, "invalid op index");
653 self.decoder.execute_user_op(Operation::Noop, op_idx + 1);
654 self.execute_op(Operation::Noop, program, host)?;
655 }
656
657 group_idx = next_group_idx;
659 next_group_idx += 1;
660 op_idx = 0;
661
662 if group_idx < num_batch_groups {
665 self.decoder.start_op_group(batch.groups()[group_idx]);
666 }
667 } else {
668 op_idx += 1;
670 }
671 }
672
673 for group_idx in group_idx..num_batch_groups {
676 self.decoder.execute_user_op(Operation::Noop, 0);
677 self.execute_op(Operation::Noop, program, host)?;
678
679 if group_idx < num_batch_groups - 1 {
683 self.decoder.start_op_group(ZERO);
684 }
685 }
686
687 Ok(())
688 }
689
690 fn execute_decorator(
692 &mut self,
693 decorator: &Decorator,
694 host: &mut impl SyncHost,
695 ) -> Result<(), ExecutionError> {
696 match decorator {
697 Decorator::Debug(options) => {
698 if self.decoder.in_debug_mode() {
699 let process = &mut self.state();
700 host.on_debug(process, options)?;
701 }
702 },
703 Decorator::AsmOp(assembly_op) => {
704 if self.decoder.in_debug_mode() {
705 self.decoder.append_asmop(self.system.clk(), assembly_op.clone());
706 }
707 },
708 Decorator::Trace(id) => {
709 if self.enable_tracing {
710 let process = &mut self.state();
711 host.on_trace(process, *id)?;
712 }
713 },
714 };
715 Ok(())
716 }
717
718 fn resolve_external_node(
723 &mut self,
724 external_node: &ExternalNode,
725 host: &impl SyncHost,
726 ) -> Result<(MastNodeId, Arc<MastForest>), ExecutionError> {
727 let node_digest = external_node.digest();
728
729 let mast_forest = host
730 .get_mast_forest(&node_digest)
731 .ok_or(ExecutionError::no_mast_forest_with_procedure(node_digest, &()))?;
732
733 let root_id = mast_forest
736 .find_procedure_root(node_digest)
737 .ok_or(ExecutionError::malfored_mast_forest_in_host(node_digest, &()))?;
738
739 if mast_forest[root_id].is_external() {
742 return Err(ExecutionError::CircularExternalNode(node_digest));
743 }
744
745 self.advice
751 .extend_map(mast_forest.advice_map())
752 .map_err(|err| ExecutionError::advice_error(err, self.system.clk(), &()))?;
753
754 Ok((root_id, mast_forest))
755 }
756
757 pub const fn kernel(&self) -> &Kernel {
761 self.chiplets.kernel_rom.kernel()
762 }
763
764 pub fn into_parts(self) -> (System, Decoder, Stack, RangeChecker, Chiplets) {
765 (self.system, self.decoder, self.stack, self.range, self.chiplets)
766 }
767}
768
769#[derive(Debug)]
770pub struct SlowProcessState<'a> {
771 advice: &'a mut AdviceProvider,
772 system: &'a System,
773 stack: &'a Stack,
774 chiplets: &'a Chiplets,
775}
776
777#[derive(Debug)]
781pub enum ProcessState<'a> {
782 Slow(SlowProcessState<'a>),
783 Fast(FastProcessState<'a>),
784}
785
786impl Process {
787 #[inline(always)]
788 pub fn state(&mut self) -> ProcessState<'_> {
789 ProcessState::Slow(SlowProcessState {
790 advice: &mut self.advice,
791 system: &self.system,
792 stack: &self.stack,
793 chiplets: &self.chiplets,
794 })
795 }
796}
797
798impl<'a> ProcessState<'a> {
799 #[inline(always)]
801 pub fn advice_provider(&self) -> &AdviceProvider {
802 match self {
803 ProcessState::Slow(state) => state.advice,
804 ProcessState::Fast(state) => &state.processor.advice,
805 }
806 }
807
808 #[inline(always)]
810 pub fn advice_provider_mut(&mut self) -> &mut AdviceProvider {
811 match self {
812 ProcessState::Slow(state) => state.advice,
813 ProcessState::Fast(state) => &mut state.processor.advice,
814 }
815 }
816
817 #[inline(always)]
819 pub fn clk(&self) -> RowIndex {
820 match self {
821 ProcessState::Slow(state) => state.system.clk(),
822 ProcessState::Fast(state) => state.processor.clk,
823 }
824 }
825
826 #[inline(always)]
828 pub fn ctx(&self) -> ContextId {
829 match self {
830 ProcessState::Slow(state) => state.system.ctx(),
831 ProcessState::Fast(state) => state.processor.ctx,
832 }
833 }
834
835 #[inline(always)]
837 pub fn fmp(&self) -> u64 {
838 match self {
839 ProcessState::Slow(state) => state.system.fmp().as_int(),
840 ProcessState::Fast(state) => state.processor.fmp.as_int(),
841 }
842 }
843
844 #[inline(always)]
846 pub fn get_stack_item(&self, pos: usize) -> Felt {
847 match self {
848 ProcessState::Slow(state) => state.stack.get(pos),
849 ProcessState::Fast(state) => state.processor.stack_get(pos),
850 }
851 }
852
853 #[inline(always)]
864 pub fn get_stack_word(&self, word_idx: usize) -> Word {
865 match self {
866 ProcessState::Slow(state) => state.stack.get_word(word_idx),
867 ProcessState::Fast(state) => state.processor.stack_get_word(word_idx * WORD_SIZE),
868 }
869 }
870
871 #[inline(always)]
874 pub fn get_stack_state(&self) -> Vec<Felt> {
875 match self {
876 ProcessState::Slow(state) => state.stack.get_state_at(state.system.clk()),
877 ProcessState::Fast(state) => state.processor.stack().iter().rev().copied().collect(),
878 }
879 }
880
881 #[inline(always)]
884 pub fn get_mem_value(&self, ctx: ContextId, addr: u32) -> Option<Felt> {
885 match self {
886 ProcessState::Slow(state) => state.chiplets.memory.get_value(ctx, addr),
887 ProcessState::Fast(state) => state.processor.memory.read_element_impl(ctx, addr),
888 }
889 }
890
891 #[inline(always)]
896 pub fn get_mem_word(&self, ctx: ContextId, addr: u32) -> Result<Option<Word>, MemoryError> {
897 match self {
898 ProcessState::Slow(state) => state.chiplets.memory.get_word(ctx, addr),
899 ProcessState::Fast(state) => {
900 state.processor.memory.read_word_impl(ctx, addr, None, &())
901 },
902 }
903 }
904
905 #[inline(always)]
911 pub fn get_mem_state(&self, ctx: ContextId) -> Vec<(MemoryAddress, Felt)> {
912 match self {
913 ProcessState::Slow(state) => {
914 state.chiplets.memory.get_state_at(ctx, state.system.clk())
915 },
916 ProcessState::Fast(state) => state.processor.memory.get_memory_state(ctx),
917 }
918 }
919}
920
921impl<'a> From<&'a mut Process> for ProcessState<'a> {
922 fn from(process: &'a mut Process) -> Self {
923 process.state()
924 }
925}
926
927pub(crate) fn add_error_ctx_to_external_error(
933 result: Result<(), ExecutionError>,
934 err_ctx: impl ErrorContext,
935) -> Result<(), ExecutionError> {
936 match result {
937 Ok(_) => Ok(()),
938 Err(err) => match err {
940 ExecutionError::NoMastForestWithProcedure { label, source_file: _, root_digest }
941 | ExecutionError::MalformedMastForestInHost { label, source_file: _, root_digest } => {
942 if label == SourceSpan::UNKNOWN {
943 let err_with_ctx =
944 ExecutionError::no_mast_forest_with_procedure(root_digest, &err_ctx);
945 Err(err_with_ctx)
946 } else {
947 Err(err)
951 }
952 },
953
954 _ => {
955 Err(err)
957 },
958 },
959 }
960}