use miden_core::Word;
use miden_processor::{ContextId, FastProcessor, Felt, StackInputs, StackOutputs, trace::RowIndex};
use smallvec::SmallVec;
use super::TraceEvent;
use crate::{debug::NativePtr, felt::FromMidenRepr};
pub type TraceHandler = dyn FnMut(RowIndex, TraceEvent);
#[derive(Debug, thiserror::Error)]
pub enum MemoryReadError {
#[error("attempted to read beyond end of linear memory")]
OutOfBounds,
#[error("unaligned reads are not supported yet")]
UnalignedRead,
}
pub struct ExecutionTrace {
pub(super) root_context: ContextId,
pub(super) last_cycle: RowIndex,
pub(super) processor: FastProcessor,
pub(super) outputs: StackOutputs,
}
impl ExecutionTrace {
pub fn empty() -> Self {
Self {
root_context: ContextId::root(),
last_cycle: RowIndex::from(0u32),
processor: FastProcessor::new(StackInputs::default()),
outputs: StackOutputs::default(),
}
}
pub fn parse_result<T>(&self) -> Option<T>
where
T: FromMidenRepr,
{
let size = <T as FromMidenRepr>::size_in_felts();
let stack = self.outputs.get_num_elements(size);
if stack.len() < size {
return None;
}
let mut stack = stack.to_vec();
stack.reverse();
Some(<T as FromMidenRepr>::pop_from_stack(&mut stack))
}
#[inline]
pub fn into_outputs(self) -> StackOutputs {
self.outputs
}
#[inline]
pub fn outputs(&self) -> &StackOutputs {
&self.outputs
}
pub fn read_memory_word(&self, addr: u32) -> Option<Word> {
self.read_memory_word_in_context(addr, self.root_context, self.last_cycle)
}
pub fn read_memory_word_in_context(
&self,
addr: u32,
ctx: ContextId,
clk: RowIndex,
) -> Option<Word> {
const ZERO: Word = Word::new([Felt::ZERO; 4]);
match self.processor.memory().read_word(ctx, Felt::new(addr as u64), clk) {
Ok(word) => Some(word),
Err(_) => Some(ZERO),
}
}
#[track_caller]
pub fn read_memory_element(&self, addr: u32) -> Option<Felt> {
self.processor
.memory()
.read_element(self.root_context, Felt::new(addr as u64))
.ok()
}
#[track_caller]
pub fn read_memory_element_in_context(
&self,
addr: u32,
ctx: ContextId,
_clk: RowIndex,
) -> Option<Felt> {
self.processor.memory().read_element(ctx, Felt::new(addr as u64)).ok()
}
pub fn read_bytes_for_type(
&self,
addr: NativePtr,
ty: &miden_assembly_syntax::ast::types::Type,
ctx: ContextId,
clk: RowIndex,
) -> Result<Vec<u8>, MemoryReadError> {
const U32_MASK: u64 = u32::MAX as u64;
let size = ty.size_in_bytes();
let mut buf = Vec::with_capacity(size);
let size_in_felts = ty.size_in_felts();
let mut elems = Vec::with_capacity(size_in_felts);
if addr.is_element_aligned() {
for i in 0..size_in_felts {
let addr = addr.addr.checked_add(i as u32).ok_or(MemoryReadError::OutOfBounds)?;
elems.push(self.read_memory_element_in_context(addr, ctx, clk).unwrap_or_default());
}
} else {
return Err(MemoryReadError::UnalignedRead);
}
let mut needed = size - buf.len();
for elem in elems {
let bytes = ((elem.as_canonical_u64() & U32_MASK) as u32).to_le_bytes();
let take = core::cmp::min(needed, 4);
buf.extend(&bytes[0..take]);
needed -= take;
}
Ok(buf)
}
#[track_caller]
pub fn read_from_rust_memory<T>(&self, addr: u32) -> Option<T>
where
T: core::any::Any + FromMidenRepr,
{
self.read_from_rust_memory_in_context(addr, self.root_context, self.last_cycle)
}
#[track_caller]
pub fn read_from_rust_memory_in_context<T>(
&self,
addr: u32,
ctx: ContextId,
clk: RowIndex,
) -> Option<T>
where
T: core::any::Any + FromMidenRepr,
{
let ptr = NativePtr::from_ptr(addr);
assert_eq!(ptr.offset, 0, "support for unaligned reads is not yet implemented");
let size = <T as FromMidenRepr>::size_in_felts();
let mut felts = SmallVec::<[_; 4]>::with_capacity(size);
for index in 0..(size as u32) {
felts.push(self.read_memory_element_in_context(ptr.addr + index, ctx, clk)?);
}
Some(T::from_felts(&felts))
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use miden_assembly::DefaultSourceManager;
use miden_assembly_syntax::ast::types::Type;
use miden_processor::{ContextId, trace::RowIndex};
use super::ExecutionTrace;
use crate::{Executor, debug::NativePtr, felt::ToMidenRepr};
fn empty_trace() -> ExecutionTrace {
ExecutionTrace {
root_context: ContextId::root(),
last_cycle: RowIndex::from(0_u32),
processor: miden_processor::FastProcessor::new(miden_processor::StackInputs::default()),
outputs: miden_processor::StackOutputs::default(),
}
}
fn execute_trace(source: &str) -> ExecutionTrace {
let source_manager = Arc::new(DefaultSourceManager::default());
let program = miden_assembly::Assembler::new(source_manager.clone())
.assemble_program(source)
.unwrap();
Executor::new(vec![]).capture_trace(&program, source_manager)
}
#[test]
fn parse_result_reads_multi_felt_outputs_in_stack_order() {
let outputs = 0x0807_0605_0403_0201_u64.to_felts();
let trace = ExecutionTrace {
outputs: miden_processor::StackOutputs::new(&outputs).unwrap(),
..empty_trace()
};
let result = trace.parse_result::<u64>().unwrap();
assert_eq!(result, 0x0807_0605_0403_0201_u64);
}
#[test]
fn read_bytes_for_type_preserves_little_endian_bytes() {
let trace = execute_trace(
r#"
begin
push.4660
push.8
mem_store
push.67305985
push.12
mem_store
push.134678021
push.13
mem_store
end
"#,
);
let ctx = ContextId::root();
let u16_bytes = trace
.read_bytes_for_type(NativePtr::new(8, 0), &Type::U16, ctx, RowIndex::from(0_u32))
.unwrap();
let u64_bytes = trace
.read_bytes_for_type(NativePtr::new(12, 0), &Type::U64, ctx, RowIndex::from(0_u32))
.unwrap();
assert_eq!(u16_bytes, vec![0x34, 0x12]);
assert_eq!(u64_bytes, vec![1, 2, 3, 4, 5, 6, 7, 8]);
}
}