use miden_core::Word;
use miden_processor::{
ContextId, FastProcessor, Felt, ProcessorState, StackInputs, StackOutputs, trace::RowIndex,
};
use smallvec::SmallVec;
use super::TraceEvent;
use crate::{debug::NativePtr, felt::FromMidenRepr};
pub type TraceHandler = dyn FnMut(&ProcessorState<'_>, TraceEvent);
#[derive(Debug, thiserror::Error)]
pub enum MemoryReadError {
#[error("attempted to read beyond end of linear memory")]
OutOfBounds,
#[error("unaligned reads are not supported yet")]
UnalignedRead,
}
pub struct ExecutionTrace {
pub(super) root_context: ContextId,
pub(super) last_cycle: RowIndex,
pub(super) processor: FastProcessor,
pub(super) outputs: StackOutputs,
}
impl ExecutionTrace {
pub fn empty() -> Self {
Self {
root_context: ContextId::root(),
last_cycle: RowIndex::from(0u32),
processor: FastProcessor::new(StackInputs::default()),
outputs: StackOutputs::default(),
}
}
pub fn parse_result<T>(&self) -> Option<T>
where
T: FromMidenRepr,
{
let size = <T as FromMidenRepr>::size_in_felts();
let stack = self.outputs.get_num_elements(size);
if stack.len() < size {
return None;
}
let mut stack = stack.to_vec();
stack.reverse();
Some(<T as FromMidenRepr>::pop_from_stack(&mut stack))
}
#[inline]
pub fn into_outputs(self) -> StackOutputs {
self.outputs
}
#[inline]
pub fn outputs(&self) -> &StackOutputs {
&self.outputs
}
pub fn read_memory_word(&self, addr: u32) -> Option<Word> {
self.read_memory_word_in_context(addr, self.root_context, self.last_cycle)
}
pub fn read_memory_word_in_context(
&self,
addr: u32,
ctx: ContextId,
clk: RowIndex,
) -> Option<Word> {
const ZERO: Word = Word::new([Felt::ZERO; 4]);
match self.processor.memory().read_word(ctx, Felt::new(addr as u64), clk) {
Ok(word) => Some(word),
Err(_) => Some(ZERO),
}
}
#[track_caller]
pub fn read_memory_element(&self, addr: u32) -> Option<Felt> {
self.processor
.memory()
.read_element(self.root_context, Felt::new(addr as u64))
.ok()
}
#[track_caller]
pub fn read_memory_element_in_context(
&self,
addr: u32,
ctx: ContextId,
_clk: RowIndex,
) -> Option<Felt> {
self.processor.memory().read_element(ctx, Felt::new(addr as u64)).ok()
}
pub fn read_bytes_for_type(
&self,
addr: NativePtr,
ty: &miden_assembly_syntax::ast::types::Type,
ctx: ContextId,
clk: RowIndex,
) -> Result<Vec<u8>, MemoryReadError> {
let size = ty.size_in_bytes();
if addr.is_element_aligned() {
read_memory_bytes(addr, size, |addr| {
Ok(self.read_memory_element_in_context(addr, ctx, clk).unwrap_or_default())
})
} else {
Err(MemoryReadError::UnalignedRead)
}
}
#[track_caller]
pub fn read_from_rust_memory<T>(&self, addr: u32) -> Option<T>
where
T: core::any::Any + FromMidenRepr,
{
self.read_from_rust_memory_in_context(addr, self.root_context, self.last_cycle)
}
#[track_caller]
pub fn read_from_rust_memory_in_context<T>(
&self,
addr: u32,
ctx: ContextId,
clk: RowIndex,
) -> Option<T>
where
T: core::any::Any + FromMidenRepr,
{
let ptr = NativePtr::from_ptr(addr);
assert_eq!(ptr.offset, 0, "support for unaligned reads is not yet implemented");
let size = <T as FromMidenRepr>::size_in_felts();
let mut felts = SmallVec::<[_; 4]>::with_capacity(size);
for index in 0..(size as u32) {
felts.push(self.read_memory_element_in_context(ptr.addr + index, ctx, clk)?);
}
Some(T::from_felts(&felts))
}
}
pub(crate) fn felt_to_le_bytes(elem: Felt) -> [u8; 4] {
((elem.as_canonical_u64() & u32::MAX as u64) as u32).to_le_bytes()
}
pub(crate) fn read_memory_bytes<E>(
ptr: NativePtr,
size: usize,
mut read_elem: impl FnMut(u32) -> Result<Felt, E>,
) -> Result<Vec<u8>, E>
where
E: From<MemoryReadError>,
{
if size == 0 {
return Ok(Vec::new());
}
let start = usize::from(ptr.offset);
let end = start.checked_add(size).ok_or_else(|| E::from(MemoryReadError::OutOfBounds))?;
let num_elements = end.div_ceil(4);
let mut bytes = Vec::with_capacity(num_elements.saturating_mul(4));
for index in 0..num_elements {
let index = u32::try_from(index).map_err(|_| E::from(MemoryReadError::OutOfBounds))?;
let elem_addr = ptr
.addr
.checked_add(index)
.ok_or_else(|| E::from(MemoryReadError::OutOfBounds))?;
bytes.extend(felt_to_le_bytes(read_elem(elem_addr)?));
}
Ok(bytes[start..end].to_vec())
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use miden_assembly::DefaultSourceManager;
use miden_assembly_syntax::ast::types::Type;
use miden_processor::{ContextId, trace::RowIndex};
use super::ExecutionTrace;
use crate::{Executor, debug::NativePtr, felt::ToMidenRepr};
fn empty_trace() -> ExecutionTrace {
ExecutionTrace {
root_context: ContextId::root(),
last_cycle: RowIndex::from(0_u32),
processor: miden_processor::FastProcessor::new(miden_processor::StackInputs::default()),
outputs: miden_processor::StackOutputs::default(),
}
}
fn execute_trace(source: &str) -> ExecutionTrace {
let source_manager = Arc::new(DefaultSourceManager::default());
let program = miden_assembly::Assembler::new(source_manager.clone())
.assemble_program(source)
.unwrap();
Executor::new(vec![]).capture_trace(&program, source_manager)
}
#[test]
fn parse_result_reads_multi_felt_outputs_in_stack_order() {
let outputs = 0x0807_0605_0403_0201_u64.to_felts();
let trace = ExecutionTrace {
outputs: miden_processor::StackOutputs::new(&outputs).unwrap(),
..empty_trace()
};
let result = trace.parse_result::<u64>().unwrap();
assert_eq!(result, 0x0807_0605_0403_0201_u64);
}
#[test]
fn read_bytes_for_type_preserves_little_endian_bytes() {
let trace = execute_trace(
r#"
begin
push.4660
push.8
mem_store
push.67305985
push.12
mem_store
push.134678021
push.13
mem_store
end
"#,
);
let ctx = ContextId::root();
let u16_bytes = trace
.read_bytes_for_type(NativePtr::new(8, 0), &Type::U16, ctx, RowIndex::from(0_u32))
.unwrap();
let u64_bytes = trace
.read_bytes_for_type(NativePtr::new(12, 0), &Type::U64, ctx, RowIndex::from(0_u32))
.unwrap();
assert_eq!(u16_bytes, vec![0x34, 0x12]);
assert_eq!(u64_bytes, vec![1, 2, 3, 4, 5, 6, 7, 8]);
}
}