use alloc::vec::Vec;
use miden_air::{
RowIndex,
trace::{
chiplets::hasher::DIGEST_LEN,
decoder::{
NUM_HASHER_COLUMNS, NUM_OP_BATCH_FLAGS, NUM_OP_BITS, NUM_OP_BITS_EXTRA_COLS,
OP_BATCH_1_GROUPS, OP_BATCH_2_GROUPS, OP_BATCH_4_GROUPS, OP_BATCH_8_GROUPS,
},
},
};
#[cfg(test)]
use miden_core::mast::OP_GROUP_SIZE;
use miden_core::{
AssemblyOp, FMP_ADDR, FMP_INIT_VALUE,
mast::{
BasicBlockNode, CallNode, DynNode, JoinNode, LoopNode, MastForest, MastNodeExt,
OP_BATCH_SIZE, SplitNode,
},
stack::MIN_STACK_DEPTH,
};
use super::{
EMPTY_WORD, ExecutionError, Felt, MIN_TRACE_LEN, ONE, OpBatch, Operation, Process, Word, ZERO,
};
use crate::{SyncHost, errors::ErrorContext};
mod trace;
use trace::DecoderTrace;
mod aux_trace;
pub use aux_trace::AuxTraceBuilder;
#[cfg(test)]
pub use aux_trace::BlockHashTableRow;
pub mod block_stack;
use block_stack::{BlockStack, BlockType, ExecutionContextInfo};
#[cfg(test)]
use miden_air::trace::decoder::NUM_USER_OP_HELPERS;
#[cfg(test)]
mod tests;
const HASH_CYCLE_LEN: Felt = Felt::new(miden_air::trace::chiplets::hasher::HASH_CYCLE_LEN as u64);
impl Process {
pub(super) fn start_join_node<H: SyncHost>(
&mut self,
node: &JoinNode,
program: &MastForest,
host: &mut H,
) -> Result<(), ExecutionError> {
let child1_hash = program
.get_node_by_id(node.first())
.ok_or(ExecutionError::MastNodeNotFoundInForest { node_id: node.first() })?
.digest();
let child2_hash = program
.get_node_by_id(node.second())
.ok_or(ExecutionError::MastNodeNotFoundInForest { node_id: node.second() })?
.digest();
let (addr, hashed_block) = self.chiplets.hasher.hash_control_block(
child1_hash,
child2_hash,
JoinNode::DOMAIN,
node.digest(),
);
debug_assert_eq!(node.digest(), hashed_block);
self.decoder.start_join(child1_hash, child2_hash, addr);
self.execute_op(Operation::Noop, program, host)
}
pub(super) fn end_join_node<H: SyncHost>(
&mut self,
node: &JoinNode,
program: &MastForest,
host: &mut H,
) -> Result<(), ExecutionError> {
self.decoder.end_control_block(node.digest());
self.execute_op(Operation::Noop, program, host)
}
pub(super) fn start_split_node<H: SyncHost>(
&mut self,
node: &SplitNode,
program: &MastForest,
host: &mut H,
) -> Result<Felt, ExecutionError> {
let condition = self.stack.peek();
let child1_hash = program
.get_node_by_id(node.on_true())
.ok_or(ExecutionError::MastNodeNotFoundInForest { node_id: node.on_true() })?
.digest();
let child2_hash = program
.get_node_by_id(node.on_false())
.ok_or(ExecutionError::MastNodeNotFoundInForest { node_id: node.on_false() })?
.digest();
let (addr, hashed_block) = self.chiplets.hasher.hash_control_block(
child1_hash,
child2_hash,
SplitNode::DOMAIN,
node.digest(),
);
debug_assert_eq!(node.digest(), hashed_block);
self.decoder.start_split(child1_hash, child2_hash, addr);
self.execute_op(Operation::Drop, program, host)?;
Ok(condition)
}
pub(super) fn end_split_node<H: SyncHost>(
&mut self,
block: &SplitNode,
program: &MastForest,
host: &mut H,
) -> Result<(), ExecutionError> {
self.decoder.end_control_block(block.digest());
self.execute_op(Operation::Noop, program, host)
}
pub(super) fn start_loop_node<H: SyncHost>(
&mut self,
node: &LoopNode,
program: &MastForest,
host: &mut H,
) -> Result<Felt, ExecutionError> {
let condition = self.stack.peek();
let body_hash = program
.get_node_by_id(node.body())
.ok_or(ExecutionError::MastNodeNotFoundInForest { node_id: node.body() })?
.digest();
let (addr, hashed_block) = self.chiplets.hasher.hash_control_block(
body_hash,
EMPTY_WORD,
LoopNode::DOMAIN,
node.digest(),
);
debug_assert_eq!(node.digest(), hashed_block);
self.decoder.start_loop(body_hash, addr, condition);
self.execute_op(Operation::Drop, program, host)?;
Ok(condition)
}
pub(super) fn end_loop_node<H: SyncHost>(
&mut self,
node: &LoopNode,
pop_stack: bool,
program: &MastForest,
host: &mut H,
) -> Result<(), ExecutionError> {
self.decoder.end_control_block(node.digest());
if pop_stack {
#[cfg(debug_assertions)]
debug_assert_eq!(ZERO, self.stack.peek());
self.execute_op(Operation::Drop, program, host)
} else {
self.execute_op(Operation::Noop, program, host)
}
}
pub(super) fn start_call_node<H: SyncHost>(
&mut self,
node: &CallNode,
program: &MastForest,
host: &mut H,
err_ctx: &impl ErrorContext,
) -> Result<(), ExecutionError> {
let callee_hash = program
.get_node_by_id(node.callee())
.ok_or(ExecutionError::MastNodeNotFoundInForest { node_id: node.callee() })?
.digest();
let (addr, hashed_block) = self.chiplets.hasher.hash_control_block(
callee_hash,
EMPTY_WORD,
node.domain(),
node.digest(),
);
debug_assert_eq!(node.digest(), hashed_block);
let (stack_depth, next_overflow_addr) = self.stack.start_context();
debug_assert!(stack_depth <= u32::MAX as usize, "stack depth too big");
let ctx_info = ExecutionContextInfo::new(
self.system.ctx(),
self.system.fn_hash(),
stack_depth as u32,
next_overflow_addr,
);
if node.is_syscall() {
self.system.start_syscall();
self.decoder.start_syscall(callee_hash, addr, ctx_info);
} else {
self.system.start_call_or_dyncall(callee_hash);
self.decoder.start_call(callee_hash, addr, ctx_info);
self.chiplets
.memory
.write(
self.system.get_next_ctx_id(),
FMP_ADDR,
self.system.clk(),
FMP_INIT_VALUE,
err_ctx,
)
.map_err(ExecutionError::MemoryError)?;
}
self.execute_op(Operation::Noop, program, host)
}
pub(super) fn end_call_node<H: SyncHost>(
&mut self,
node: &CallNode,
program: &MastForest,
host: &mut H,
err_ctx: &impl ErrorContext,
) -> Result<(), ExecutionError> {
let stack_depth = self.stack.depth();
if stack_depth > MIN_STACK_DEPTH {
return Err(ExecutionError::invalid_stack_depth_on_return(stack_depth, err_ctx));
}
let ctx_info = self.decoder.end_control_block(node.digest()).expect("no execution context");
self.system.restore_context(ctx_info.parent_ctx, ctx_info.parent_fn_hash);
self.stack.restore_context(ctx_info.parent_stack_depth as usize);
self.execute_op(Operation::Noop, program, host)
}
pub(super) fn start_dyn_node<H: SyncHost>(
&mut self,
dyn_node: &DynNode,
program: &MastForest,
host: &mut H,
err_ctx: &impl ErrorContext,
) -> Result<Word, ExecutionError> {
debug_assert!(!dyn_node.is_dyncall());
let mem_addr = self.stack.get(0);
let callee_hash = self
.chiplets
.memory
.read_word(self.system.ctx(), mem_addr, self.system.clk(), err_ctx)
.map_err(ExecutionError::MemoryError)?;
let (addr, hashed_block) = self.chiplets.hasher.hash_control_block(
EMPTY_WORD,
EMPTY_WORD,
dyn_node.domain(),
dyn_node.digest(),
);
debug_assert_eq!(dyn_node.digest(), hashed_block);
self.decoder.start_dyn(addr, callee_hash);
self.execute_op(Operation::Drop, program, host)?;
Ok(callee_hash)
}
pub(super) fn start_dyncall_node(
&mut self,
dyn_node: &DynNode,
err_ctx: &impl ErrorContext,
) -> Result<Word, ExecutionError> {
debug_assert!(dyn_node.is_dyncall());
let mem_addr = self.stack.get(0);
let callee_hash = self
.chiplets
.memory
.read_word(self.system.ctx(), mem_addr, self.system.clk(), err_ctx)
.map_err(ExecutionError::MemoryError)?;
self.chiplets
.memory
.write(
self.system.get_next_ctx_id(),
FMP_ADDR,
self.system.clk(),
FMP_INIT_VALUE,
err_ctx,
)
.map_err(ExecutionError::MemoryError)?;
self.ensure_trace_capacity();
let (addr, hashed_block) = self.chiplets.hasher.hash_control_block(
EMPTY_WORD,
EMPTY_WORD,
dyn_node.domain(),
dyn_node.digest(),
);
debug_assert_eq!(dyn_node.digest(), hashed_block);
let (stack_depth, next_overflow_addr) = self.stack.shift_left_and_start_context();
debug_assert!(stack_depth <= u32::MAX as usize, "stack depth too big");
let ctx_info = ExecutionContextInfo::new(
self.system.ctx(),
self.system.fn_hash(),
stack_depth as u32,
next_overflow_addr,
);
self.system.start_call_or_dyncall(callee_hash);
self.decoder.start_dyncall(addr, callee_hash, ctx_info);
self.advance_clock()?;
Ok(callee_hash)
}
pub(super) fn end_dyn_node<H: SyncHost>(
&mut self,
dyn_node: &DynNode,
program: &MastForest,
host: &mut H,
) -> Result<(), ExecutionError> {
self.decoder.end_control_block(dyn_node.digest());
self.execute_op(Operation::Noop, program, host)
}
pub(super) fn end_dyncall_node<H: SyncHost>(
&mut self,
dyn_node: &DynNode,
program: &MastForest,
host: &mut H,
err_ctx: &impl ErrorContext,
) -> Result<(), ExecutionError> {
let stack_depth = self.stack.depth();
if stack_depth > MIN_STACK_DEPTH {
return Err(ExecutionError::invalid_stack_depth_on_return(stack_depth, err_ctx));
}
let ctx_info =
self.decoder.end_control_block(dyn_node.digest()).expect("no execution context");
self.system.restore_context(ctx_info.parent_ctx, ctx_info.parent_fn_hash);
self.stack.restore_context(ctx_info.parent_stack_depth as usize);
self.execute_op(Operation::Noop, program, host)
}
pub(super) fn start_basic_block_node<H: SyncHost>(
&mut self,
basic_block: &BasicBlockNode,
program: &MastForest,
host: &mut H,
) -> Result<(), ExecutionError> {
let op_batches = basic_block.op_batches();
let (addr, hashed_block) =
self.chiplets.hasher.hash_basic_block(op_batches, basic_block.digest());
debug_assert_eq!(basic_block.digest(), hashed_block);
let num_op_groups = basic_block.num_op_groups();
self.decoder
.start_basic_block(&op_batches[0], Felt::new(num_op_groups as u64), addr);
self.execute_op(Operation::Noop, program, host)
}
pub(super) fn end_basic_block_node<H: SyncHost>(
&mut self,
block: &BasicBlockNode,
program: &MastForest,
host: &mut H,
) -> Result<(), ExecutionError> {
self.decoder.end_basic_block(block.digest());
self.execute_op(Operation::Noop, program, host)
}
pub(super) fn respan(&mut self, op_batch: &OpBatch) {
self.decoder.respan(op_batch);
}
}
pub struct Decoder {
block_stack: BlockStack,
basic_block_context: Option<BasicBlockContext>,
trace: DecoderTrace,
debug_info: DebugInfo,
}
impl Decoder {
pub fn new(in_debug_mode: bool) -> Self {
Self {
block_stack: BlockStack::default(),
basic_block_context: None,
trace: DecoderTrace::new(),
debug_info: DebugInfo::new(in_debug_mode),
}
}
pub fn trace_len(&self) -> usize {
self.trace.trace_len()
}
pub fn program_hash(&self) -> [Felt; DIGEST_LEN] {
self.trace.program_hash()
}
pub fn debug_info(&self) -> &DebugInfo {
debug_assert!(self.in_debug_mode());
&self.debug_info
}
pub fn in_debug_mode(&self) -> bool {
self.debug_info.in_debug_mode()
}
pub fn start_join(&mut self, child1_hash: Word, child2_hash: Word, addr: Felt) {
let parent_addr = self.block_stack.push(addr, BlockType::Join(false), None);
self.trace
.append_block_start(parent_addr, Operation::Join, child1_hash, child2_hash);
self.debug_info.append_operation(Operation::Join);
}
pub fn start_split(&mut self, child1_hash: Word, child2_hash: Word, addr: Felt) {
let parent_addr = self.block_stack.push(addr, BlockType::Split, None);
self.trace
.append_block_start(parent_addr, Operation::Split, child1_hash, child2_hash);
self.debug_info.append_operation(Operation::Split);
}
pub fn start_loop(&mut self, loop_body_hash: Word, addr: Felt, stack_top: Felt) {
let enter_loop = stack_top == ONE;
let parent_addr = self.block_stack.push(addr, BlockType::Loop(enter_loop), None);
self.trace
.append_block_start(parent_addr, Operation::Loop, loop_body_hash, EMPTY_WORD);
self.debug_info.append_operation(Operation::Loop);
}
pub fn repeat(&mut self) {
let block_info = self.block_stack.peek();
debug_assert_eq!(ONE, block_info.is_entered_loop());
self.trace.append_loop_repeat(block_info.addr);
self.debug_info.append_operation(Operation::Repeat);
}
pub fn start_call(&mut self, fn_hash: Word, addr: Felt, ctx_info: ExecutionContextInfo) {
let parent_addr = self.block_stack.push(addr, BlockType::Call, Some(ctx_info));
self.trace.append_block_start(parent_addr, Operation::Call, fn_hash, EMPTY_WORD);
self.debug_info.append_operation(Operation::Call);
}
pub fn start_syscall(&mut self, fn_hash: Word, addr: Felt, ctx_info: ExecutionContextInfo) {
let parent_addr = self.block_stack.push(addr, BlockType::SysCall, Some(ctx_info));
self.trace
.append_block_start(parent_addr, Operation::SysCall, fn_hash, EMPTY_WORD);
self.debug_info.append_operation(Operation::SysCall);
}
pub fn start_dyn(&mut self, addr: Felt, callee_hash: Word) {
let parent_addr = self.block_stack.push(addr, BlockType::Dyn, None);
self.trace
.append_block_start(parent_addr, Operation::Dyn, callee_hash, [ZERO; 4].into());
self.debug_info.append_operation(Operation::Dyn);
}
pub fn start_dyncall(&mut self, addr: Felt, callee_hash: Word, ctx_info: ExecutionContextInfo) {
let parent_stack_depth = ctx_info.parent_stack_depth.into();
let parent_next_overflow_addr = ctx_info.parent_next_overflow_addr;
let parent_addr = self.block_stack.push(addr, BlockType::Dyncall, Some(ctx_info));
self.trace.append_block_start(
parent_addr,
Operation::Dyncall,
callee_hash,
[parent_stack_depth, parent_next_overflow_addr, ZERO, ZERO].into(),
);
self.debug_info.append_operation(Operation::Dyncall);
}
pub fn end_control_block(&mut self, block_hash: Word) -> Option<ExecutionContextInfo> {
let block_info = self.block_stack.pop();
self.trace.append_block_end(
block_info.addr,
block_hash,
block_info.is_loop_body(),
block_info.is_entered_loop(),
block_info.is_call(),
block_info.is_syscall(),
);
self.debug_info.append_operation(Operation::End);
block_info.ctx_info
}
pub fn start_basic_block(&mut self, first_op_batch: &OpBatch, num_op_groups: Felt, addr: Felt) {
debug_assert!(self.basic_block_context.is_none(), "already in basic block");
let parent_addr = self.block_stack.push(addr, BlockType::BasicBlock, None);
self.trace
.append_span_start(parent_addr, first_op_batch.groups(), num_op_groups);
self.basic_block_context = Some(BasicBlockContext {
num_groups_left: num_op_groups - ONE,
group_ops_left: first_op_batch.groups()[0],
});
self.debug_info.append_operation(Operation::Span);
}
pub fn respan(&mut self, op_batch: &OpBatch) {
self.trace.append_respan(op_batch.groups());
let block_info = self.block_stack.peek_mut();
block_info.addr += HASH_CYCLE_LEN;
let ctx = self.basic_block_context.as_mut().expect("not in basic block");
ctx.num_groups_left -= ONE;
ctx.group_ops_left = op_batch.groups()[0];
self.debug_info.append_operation(Operation::Respan);
}
pub fn start_op_group(&mut self, op_group: Felt) {
let ctx = self.basic_block_context.as_mut().expect("not in basic block");
debug_assert_eq!(ZERO, ctx.group_ops_left, "not all ops executed in current group");
ctx.group_ops_left = op_group;
ctx.num_groups_left -= ONE;
}
pub fn execute_user_op(&mut self, op: Operation, op_idx: usize) {
let block = self.block_stack.peek();
let ctx = self.basic_block_context.as_mut().expect("not in basic block");
ctx.group_ops_left = remove_opcode_from_group(ctx.group_ops_left, op);
self.trace.append_user_op(
op,
block.addr,
block.parent_addr,
ctx.num_groups_left,
ctx.group_ops_left,
Felt::from(op_idx as u32),
);
if op.imm_value().is_some() {
ctx.num_groups_left -= ONE;
}
self.debug_info.append_operation(op);
}
pub fn set_user_op_helpers(&mut self, op: Operation, values: &[Felt]) {
debug_assert!(
!op.populates_decoder_hasher_registers(),
"user op helper registers not available for op"
);
self.trace.set_user_op_helpers(values);
}
pub fn end_basic_block(&mut self, block_hash: Word) {
let block_info = self.block_stack.pop();
self.trace.append_span_end(block_hash, block_info.is_loop_body());
self.basic_block_context = None;
self.debug_info.append_operation(Operation::End);
}
pub fn into_trace(self, trace_len: usize, num_rand_rows: usize) -> super::DecoderTrace {
let trace = self
.trace
.into_vec(trace_len, num_rand_rows)
.try_into()
.expect("failed to convert vector to array");
let aux_builder = AuxTraceBuilder::default();
super::DecoderTrace { trace, aux_builder }
}
pub fn append_asmop(&mut self, clk: RowIndex, asmop: AssemblyOp) {
self.debug_info.append_asmop(clk, asmop);
}
#[cfg(test)]
pub fn add_dummy_trace_row(&mut self) {
self.trace.add_dummy_row();
}
#[cfg(test)]
pub fn get_user_op_helpers(&self) -> [Felt; NUM_USER_OP_HELPERS] {
self.trace.get_user_op_helpers()
}
}
impl Default for Decoder {
fn default() -> Self {
Self::new(false)
}
}
#[derive(Default)]
pub(crate) struct BasicBlockContext {
pub group_ops_left: Felt,
pub num_groups_left: Felt,
}
fn remove_opcode_from_group(op_group: Felt, op: Operation) -> Felt {
let opcode = op.op_code() as u64;
let result = Felt::new((op_group.as_int() - opcode) >> NUM_OP_BITS);
debug_assert!(op_group.as_int() >= result.as_int(), "op group underflow");
result
}
fn get_num_groups_in_next_batch(num_groups_left: Felt) -> usize {
core::cmp::min(num_groups_left.as_int() as usize, OP_BATCH_SIZE)
}
#[cfg(test)]
pub fn build_op_group(ops: &[Operation]) -> Felt {
let mut group = 0u64;
let mut i = 0;
for op in ops.iter() {
group |= (op.op_code() as u64) << (Operation::OP_BITS * i);
i += 1;
}
assert!(i <= OP_GROUP_SIZE, "too many ops");
Felt::new(group)
}
pub struct DebugInfo {
in_debug_mode: bool,
operations: Vec<Operation>,
assembly_ops: Vec<(usize, AssemblyOp)>,
}
impl DebugInfo {
pub fn new(in_debug_mode: bool) -> Self {
Self {
in_debug_mode,
operations: Vec::<Operation>::new(),
assembly_ops: Vec::<(usize, AssemblyOp)>::new(),
}
}
#[inline(always)]
pub fn in_debug_mode(&self) -> bool {
self.in_debug_mode
}
pub fn operations(&self) -> &[Operation] {
&self.operations
}
pub fn assembly_ops(&self) -> &[(usize, AssemblyOp)] {
&self.assembly_ops
}
#[inline(always)]
pub fn append_operation(&mut self, op: Operation) {
if self.in_debug_mode {
self.operations.push(op);
}
}
pub fn append_asmop(&mut self, clk: RowIndex, asmop: AssemblyOp) {
self.assembly_ops.push((clk.into(), asmop));
}
}