1use std::{
2 cell::RefCell,
3 collections::{BTreeMap, BTreeSet, VecDeque},
4 rc::Rc,
5};
6
7use miden_assembly::Library as CompiledLibrary;
8use miden_core::{FieldElement, Program, StackInputs, Word};
9use miden_processor::{
10 AdviceInputs, ContextId, ExecutionError, Felt, MastForest, MemAdviceProvider, Process,
11 ProcessState, RowIndex, StackOutputs, TraceLenSummary, VmState, VmStateIterator,
12};
13use midenc_codegen_masm::NativePtr;
14pub use midenc_codegen_masm::TraceEvent;
15use midenc_hir::{SmallVec, ToSmallVec, Type};
16use midenc_session::Session;
17
18use super::MemoryChiplet;
19use crate::{debug::CallStack, DebuggerHost, FromMidenRepr, TestFelt};
20
21pub type TraceHandler = dyn FnMut(RowIndex, TraceEvent);
23
24#[derive(Debug, thiserror::Error)]
26pub enum MemoryReadError {
27 #[error("attempted to read beyond end of linear memory")]
28 OutOfBounds,
29 #[error("unaligned reads are not supported yet")]
30 UnalignedRead,
31}
32
33pub struct ExecutionTrace {
39 pub(super) root_context: ContextId,
40 pub(super) last_cycle: RowIndex,
41 pub(super) memory: MemoryChiplet,
42 pub(super) outputs: StackOutputs,
43 pub(super) trace_len_summary: TraceLenSummary,
44}
45
46impl ExecutionTrace {
47 pub fn parse_result<T>(&self) -> Option<T>
49 where
50 T: FromMidenRepr,
51 {
52 let size = <T as FromMidenRepr>::size_in_felts();
53 let stack = self.outputs.stack_truncated(size);
54 if stack.len() < size {
55 return None;
56 }
57 dbg!(stack);
58 let mut stack = stack.to_vec();
59 stack.reverse();
60 dbg!(&stack);
61 Some(<T as FromMidenRepr>::pop_from_stack(&mut stack))
62 }
63
64 #[inline]
66 pub fn into_outputs(self) -> StackOutputs {
67 self.outputs
68 }
69
70 #[inline]
72 pub fn outputs(&self) -> &StackOutputs {
73 &self.outputs
74 }
75
76 #[inline]
78 pub fn trace_len_summary(&self) -> &TraceLenSummary {
79 &self.trace_len_summary
80 }
81
82 pub fn read_memory_word(&self, addr: u32) -> Option<Word> {
84 self.read_memory_word_in_context(addr, self.root_context, self.last_cycle)
85 }
86
87 pub fn read_memory_word_in_context(
89 &self,
90 addr: u32,
91 ctx: ContextId,
92 clk: RowIndex,
93 ) -> Option<Word> {
94 use miden_core::FieldElement;
95
96 const ZERO: Word = [Felt::ZERO; 4];
97
98 Some(
99 self.memory
100 .get_word(ctx, addr)
101 .unwrap_or_else(|err| panic!("{err}"))
102 .unwrap_or(ZERO),
103 )
104 }
105
106 #[track_caller]
108 pub fn read_memory_element(&self, addr: u32) -> Option<Felt> {
109 self.memory.get_value(self.root_context, addr)
110 }
111
112 #[track_caller]
115 pub fn read_memory_element_in_context(
116 &self,
117 addr: u32,
118 ctx: ContextId,
119 _clk: RowIndex,
120 ) -> Option<Felt> {
121 self.memory.get_value(ctx, addr)
122 }
123
124 pub fn read_bytes_for_type(
127 &self,
128 addr: NativePtr,
129 ty: &Type,
130 ctx: ContextId,
131 clk: RowIndex,
132 ) -> Result<Vec<u8>, MemoryReadError> {
133 const U32_MASK: u64 = u32::MAX as u64;
134 let size = ty.size_in_bytes();
135 let mut buf = Vec::with_capacity(size);
136
137 let size_in_felts = ty.size_in_felts();
138 let mut elems = Vec::with_capacity(size_in_felts);
139
140 if addr.is_element_aligned() {
141 for i in 0..size_in_felts {
142 let addr = addr.addr.checked_add(i as u32).ok_or(MemoryReadError::OutOfBounds)?;
143 elems.push(self.read_memory_element_in_context(addr, ctx, clk).unwrap_or_default());
144 }
145 } else {
146 return Err(MemoryReadError::UnalignedRead);
147 }
148
149 let mut needed = size - buf.len();
150 for elem in elems {
151 let bytes = ((elem.as_int() & U32_MASK) as u32).to_be_bytes();
152 let take = core::cmp::min(needed, 4);
153 buf.extend(&bytes[0..take]);
154 needed -= take;
155 }
156
157 Ok(buf)
158 }
159
160 #[track_caller]
162 pub fn read_from_rust_memory<T>(&self, addr: u32) -> Option<T>
163 where
164 T: core::any::Any + FromMidenRepr,
165 {
166 self.read_from_rust_memory_in_context(addr, self.root_context, self.last_cycle)
167 }
168
169 #[track_caller]
172 pub fn read_from_rust_memory_in_context<T>(
173 &self,
174 addr: u32,
175 ctx: ContextId,
176 clk: RowIndex,
177 ) -> Option<T>
178 where
179 T: core::any::Any + FromMidenRepr,
180 {
181 use core::any::TypeId;
182
183 let ptr = NativePtr::from_ptr(addr);
184 if TypeId::of::<T>() == TypeId::of::<Felt>() {
185 assert_eq!(ptr.offset, 0, "cannot read values of type Felt from unaligned addresses");
186 }
187 assert_eq!(ptr.offset, 0, "support for unaligned reads is not yet implemented");
188 match <T as FromMidenRepr>::size_in_felts() {
189 1 => {
190 let felt = self.read_memory_element_in_context(ptr.addr, ctx, clk)?;
191 Some(T::from_felts(&[felt]))
192 }
193 2 => {
194 let lo = self.read_memory_element_in_context(ptr.addr, ctx, clk)?;
195 let hi = self.read_memory_element_in_context(ptr.addr + 1, ctx, clk)?;
196 Some(T::from_felts(&[lo, hi]))
197 }
198 3 => {
199 let lo_l = self.read_memory_element_in_context(ptr.addr, ctx, clk)?;
200 let lo_h = self.read_memory_element_in_context(ptr.addr + 1, ctx, clk)?;
201 let hi_l = self.read_memory_element_in_context(ptr.addr + 2, ctx, clk)?;
202 Some(T::from_felts(&[lo_l, lo_h, hi_l]))
203 }
204 n => {
205 assert_ne!(n, 0);
206 let num_words = n.next_multiple_of(4) / 4;
207 let mut words = SmallVec::<[_; 2]>::with_capacity(num_words);
208 for word_index in 0..(num_words as u32) {
209 let addr = ptr.addr + (word_index * 4);
210 let mut word = self.read_memory_word(addr)?;
211 word.reverse();
212 dbg!(word_index, word);
213 words.push(word);
214 }
215 words.resize(num_words, [Felt::ZERO; 4]);
216 Some(T::from_words(&words))
217 }
218 }
219 }
220}