use super::{
Call, Dyn, ExecutionError, Felt, Host, Join, Loop, OpBatch, Operation, Process, Span, Split,
Word, EMPTY_WORD, MIN_TRACE_LEN, ONE, OP_BATCH_SIZE, ZERO,
};
use alloc::vec::Vec;
use miden_air::trace::{
chiplets::hasher::DIGEST_LEN,
decoder::{
NUM_HASHER_COLUMNS, NUM_OP_BATCH_FLAGS, NUM_OP_BITS, NUM_OP_BITS_EXTRA_COLS,
OP_BATCH_1_GROUPS, OP_BATCH_2_GROUPS, OP_BATCH_4_GROUPS, OP_BATCH_8_GROUPS,
},
};
use vm_core::{code_blocks::get_span_op_group_count, stack::STACK_TOP_SIZE, AssemblyOp};
mod trace;
use trace::DecoderTrace;
mod aux_trace;
pub use aux_trace::AuxTraceBuilder;
mod block_stack;
use block_stack::{BlockStack, BlockType, ExecutionContextInfo};
#[cfg(test)]
use miden_air::trace::decoder::NUM_USER_OP_HELPERS;
#[cfg(test)]
mod tests;
const HASH_CYCLE_LEN: Felt = Felt::new(miden_air::trace::chiplets::hasher::HASH_CYCLE_LEN as u64);
impl<H> Process<H>
where
H: Host,
{
pub(super) fn start_join_block(&mut self, block: &Join) -> Result<(), ExecutionError> {
let child1_hash = block.first().hash().into();
let child2_hash = block.second().hash().into();
let addr =
self.chiplets
.hash_control_block(child1_hash, child2_hash, Join::DOMAIN, block.hash());
self.decoder.start_join(child1_hash, child2_hash, addr);
self.execute_op(Operation::Noop)
}
pub(super) fn end_join_block(&mut self, block: &Join) -> Result<(), ExecutionError> {
self.decoder.end_control_block(block.hash().into());
self.execute_op(Operation::Noop)
}
pub(super) fn start_split_block(&mut self, block: &Split) -> Result<Felt, ExecutionError> {
let condition = self.stack.peek();
let child1_hash = block.on_true().hash().into();
let child2_hash = block.on_false().hash().into();
let addr =
self.chiplets
.hash_control_block(child1_hash, child2_hash, Split::DOMAIN, block.hash());
self.decoder.start_split(child1_hash, child2_hash, addr);
self.execute_op(Operation::Drop)?;
Ok(condition)
}
pub(super) fn end_split_block(&mut self, block: &Split) -> Result<(), ExecutionError> {
self.decoder.end_control_block(block.hash().into());
self.execute_op(Operation::Noop)
}
pub(super) fn start_loop_block(&mut self, block: &Loop) -> Result<Felt, ExecutionError> {
let condition = self.stack.peek();
let body_hash = block.body().hash().into();
let addr =
self.chiplets
.hash_control_block(body_hash, EMPTY_WORD, Loop::DOMAIN, block.hash());
self.decoder.start_loop(body_hash, addr, condition);
self.execute_op(Operation::Drop)?;
Ok(condition)
}
pub(super) fn end_loop_block(
&mut self,
block: &Loop,
pop_stack: bool,
) -> Result<(), ExecutionError> {
self.decoder.end_control_block(block.hash().into());
if pop_stack {
#[cfg(debug_assertions)]
debug_assert_eq!(ZERO, self.stack.peek());
self.execute_op(Operation::Drop)
} else {
self.execute_op(Operation::Noop)
}
}
pub(super) fn start_call_block(&mut self, block: &Call) -> Result<(), ExecutionError> {
let fn_hash = block.fn_hash().into();
let addr =
self.chiplets
.hash_control_block(fn_hash, EMPTY_WORD, block.domain(), block.hash());
let (stack_depth, next_overflow_addr) = self.stack.start_context();
debug_assert!(stack_depth <= u32::MAX as usize, "stack depth too big");
let ctx_info = ExecutionContextInfo::new(
self.system.ctx(),
self.system.fn_hash(),
self.system.fmp(),
stack_depth as u32,
next_overflow_addr,
);
if block.is_syscall() {
self.system.start_syscall();
self.decoder.start_syscall(fn_hash, addr, ctx_info);
} else {
self.system.start_call(fn_hash);
self.decoder.start_call(fn_hash, addr, ctx_info);
}
self.execute_op(Operation::Noop)
}
pub(super) fn end_call_block(&mut self, block: &Call) -> Result<(), ExecutionError> {
let stack_depth = self.stack.depth();
if stack_depth > STACK_TOP_SIZE {
return Err(ExecutionError::InvalidStackDepthOnReturn(stack_depth));
}
let ctx_info = self
.decoder
.end_control_block(block.hash().into())
.expect("no execution context");
self.system.restore_context(
ctx_info.parent_ctx,
ctx_info.parent_fmp,
ctx_info.parent_fn_hash,
);
self.stack.restore_context(
ctx_info.parent_stack_depth as usize,
ctx_info.parent_next_overflow_addr,
);
self.execute_op(Operation::Noop)
}
pub(super) fn start_dyn_block(
&mut self,
block: &Dyn,
dyn_hash: Word,
) -> Result<(), ExecutionError> {
let addr =
self.chiplets
.hash_control_block(EMPTY_WORD, EMPTY_WORD, Dyn::DOMAIN, block.hash());
self.decoder.start_dyn(dyn_hash, addr);
self.execute_op(Operation::Noop)
}
pub(super) fn end_dyn_block(&mut self, block: &Dyn) -> Result<(), ExecutionError> {
self.decoder.end_control_block(block.hash().into());
self.execute_op(Operation::Noop)
}
pub(super) fn start_span_block(&mut self, block: &Span) -> Result<(), ExecutionError> {
let op_batches = block.op_batches();
let addr = self.chiplets.hash_span_block(op_batches, block.hash());
let num_op_groups = get_span_op_group_count(op_batches);
self.decoder.start_span(&op_batches[0], Felt::new(num_op_groups as u64), addr);
self.execute_op(Operation::Noop)
}
pub(super) fn respan(&mut self, op_batch: &OpBatch) {
self.decoder.respan(op_batch);
}
pub(super) fn end_span_block(&mut self, block: &Span) -> Result<(), ExecutionError> {
self.decoder.end_span(block.hash().into());
self.execute_op(Operation::Noop)
}
}
pub struct Decoder {
block_stack: BlockStack,
span_context: Option<SpanContext>,
trace: DecoderTrace,
debug_info: DebugInfo,
}
impl Decoder {
pub fn new(in_debug_mode: bool) -> Self {
Self {
block_stack: BlockStack::default(),
span_context: None,
trace: DecoderTrace::new(),
debug_info: DebugInfo::new(in_debug_mode),
}
}
pub fn trace_len(&self) -> usize {
self.trace.trace_len()
}
pub fn program_hash(&self) -> [Felt; DIGEST_LEN] {
self.trace.program_hash()
}
pub fn debug_info(&self) -> &DebugInfo {
debug_assert!(self.in_debug_mode());
&self.debug_info
}
pub fn in_debug_mode(&self) -> bool {
self.debug_info.in_debug_mode()
}
pub fn start_join(&mut self, child1_hash: Word, child2_hash: Word, addr: Felt) {
let parent_addr = self.block_stack.push(addr, BlockType::Join(false), None);
self.trace
.append_block_start(parent_addr, Operation::Join, child1_hash, child2_hash);
self.debug_info.append_operation(Operation::Join);
}
pub fn start_split(&mut self, child1_hash: Word, child2_hash: Word, addr: Felt) {
let parent_addr = self.block_stack.push(addr, BlockType::Split, None);
self.trace
.append_block_start(parent_addr, Operation::Split, child1_hash, child2_hash);
self.debug_info.append_operation(Operation::Split);
}
pub fn start_loop(&mut self, loop_body_hash: Word, addr: Felt, stack_top: Felt) {
let enter_loop = stack_top == ONE;
let parent_addr = self.block_stack.push(addr, BlockType::Loop(enter_loop), None);
self.trace
.append_block_start(parent_addr, Operation::Loop, loop_body_hash, EMPTY_WORD);
self.debug_info.append_operation(Operation::Loop);
}
pub fn repeat(&mut self) {
let block_info = self.block_stack.peek();
debug_assert_eq!(ONE, block_info.is_entered_loop());
self.trace.append_loop_repeat(block_info.addr);
self.debug_info.append_operation(Operation::Repeat);
}
pub fn start_call(&mut self, fn_hash: Word, addr: Felt, ctx_info: ExecutionContextInfo) {
let parent_addr = self.block_stack.push(addr, BlockType::Call, Some(ctx_info));
self.trace.append_block_start(parent_addr, Operation::Call, fn_hash, EMPTY_WORD);
self.debug_info.append_operation(Operation::Call);
}
pub fn start_syscall(&mut self, fn_hash: Word, addr: Felt, ctx_info: ExecutionContextInfo) {
let parent_addr = self.block_stack.push(addr, BlockType::SysCall, Some(ctx_info));
self.trace
.append_block_start(parent_addr, Operation::SysCall, fn_hash, EMPTY_WORD);
self.debug_info.append_operation(Operation::SysCall);
}
pub fn start_dyn(&mut self, dyn_hash: Word, addr: Felt) {
let parent_addr = self.block_stack.push(addr, BlockType::Dyn, None);
self.trace.append_block_start(parent_addr, Operation::Dyn, dyn_hash, [ZERO; 4]);
self.debug_info.append_operation(Operation::Dyn);
}
pub fn end_control_block(&mut self, block_hash: Word) -> Option<ExecutionContextInfo> {
let block_info = self.block_stack.pop();
self.trace.append_block_end(
block_info.addr,
block_hash,
block_info.is_loop_body(),
block_info.is_entered_loop(),
block_info.is_call(),
block_info.is_syscall(),
);
self.debug_info.append_operation(Operation::End);
block_info.ctx_info
}
pub fn start_span(&mut self, first_op_batch: &OpBatch, num_op_groups: Felt, addr: Felt) {
debug_assert!(self.span_context.is_none(), "already in span");
let parent_addr = self.block_stack.push(addr, BlockType::Span, None);
self.trace
.append_span_start(parent_addr, first_op_batch.groups(), num_op_groups);
self.span_context = Some(SpanContext {
num_groups_left: num_op_groups - ONE,
group_ops_left: first_op_batch.groups()[0],
});
self.debug_info.append_operation(Operation::Span);
}
pub fn respan(&mut self, op_batch: &OpBatch) {
self.trace.append_respan(op_batch.groups());
let block_info = self.block_stack.peek_mut();
block_info.addr += HASH_CYCLE_LEN;
let ctx = self.span_context.as_mut().expect("not in span");
ctx.num_groups_left -= ONE;
ctx.group_ops_left = op_batch.groups()[0];
self.debug_info.append_operation(Operation::Respan);
}
pub fn start_op_group(&mut self, op_group: Felt) {
let ctx = self.span_context.as_mut().expect("not in span");
debug_assert_eq!(ZERO, ctx.group_ops_left, "not all ops executed in current group");
ctx.group_ops_left = op_group;
ctx.num_groups_left -= ONE;
}
pub fn execute_user_op(&mut self, op: Operation, op_idx: usize) {
let block = self.block_stack.peek();
let ctx = self.span_context.as_mut().expect("not in span");
ctx.group_ops_left = remove_opcode_from_group(ctx.group_ops_left, op);
self.trace.append_user_op(
op,
block.addr,
block.parent_addr,
ctx.num_groups_left,
ctx.group_ops_left,
Felt::from(op_idx as u32),
);
if op.imm_value().is_some() {
ctx.num_groups_left -= ONE;
}
self.debug_info.append_operation(op);
}
pub fn set_user_op_helpers(&mut self, op: Operation, values: &[Felt]) {
debug_assert!(!op.is_control_op(), "op is a control operation");
self.trace.set_user_op_helpers(values);
}
pub fn end_span(&mut self, block_hash: Word) {
let block_info = self.block_stack.pop();
self.trace.append_span_end(block_hash, block_info.is_loop_body());
self.span_context = None;
self.debug_info.append_operation(Operation::End);
}
pub fn into_trace(self, trace_len: usize, num_rand_rows: usize) -> super::DecoderTrace {
let trace = self
.trace
.into_vec(trace_len, num_rand_rows)
.try_into()
.expect("failed to convert vector to array");
let aux_builder = AuxTraceBuilder::default();
super::DecoderTrace { trace, aux_builder }
}
pub fn append_asmop(&mut self, clk: u32, asmop: AssemblyOp) {
self.debug_info.append_asmop(clk, asmop);
}
#[cfg(test)]
pub fn add_dummy_trace_row(&mut self) {
self.trace.add_dummy_row();
}
#[cfg(test)]
pub fn get_user_op_helpers(&self) -> [Felt; NUM_USER_OP_HELPERS] {
self.trace.get_user_op_helpers()
}
}
impl Default for Decoder {
fn default() -> Self {
Self::new(false)
}
}
#[derive(Default)]
struct SpanContext {
group_ops_left: Felt,
num_groups_left: Felt,
}
fn remove_opcode_from_group(op_group: Felt, op: Operation) -> Felt {
let opcode = op.op_code() as u64;
let result = Felt::new((op_group.as_int() - opcode) >> NUM_OP_BITS);
debug_assert!(op_group.as_int() >= result.as_int(), "op group underflow");
result
}
fn get_num_groups_in_next_batch(num_groups_left: Felt) -> usize {
core::cmp::min(num_groups_left.as_int() as usize, OP_BATCH_SIZE)
}
#[cfg(test)]
pub fn build_op_group(ops: &[Operation]) -> Felt {
let mut group = 0u64;
let mut i = 0;
for op in ops.iter() {
group |= (op.op_code() as u64) << (Operation::OP_BITS * i);
i += 1;
}
assert!(i <= super::OP_GROUP_SIZE, "too many ops");
Felt::new(group)
}
pub struct DebugInfo {
in_debug_mode: bool,
operations: Vec<Operation>,
assembly_ops: Vec<(usize, AssemblyOp)>,
}
impl DebugInfo {
pub fn new(in_debug_mode: bool) -> Self {
Self {
in_debug_mode,
operations: Vec::<Operation>::new(),
assembly_ops: Vec::<(usize, AssemblyOp)>::new(),
}
}
#[inline(always)]
pub fn in_debug_mode(&self) -> bool {
self.in_debug_mode
}
pub fn operations(&self) -> &[Operation] {
&self.operations
}
pub fn assembly_ops(&self) -> &[(usize, AssemblyOp)] {
&self.assembly_ops
}
#[inline(always)]
pub fn append_operation(&mut self, op: Operation) {
if self.in_debug_mode {
self.operations.push(op);
}
}
pub fn append_asmop(&mut self, clk: u32, asmop: AssemblyOp) {
self.assembly_ops.push((clk as usize, asmop));
}
}