1use alloc::{sync::Arc, vec::Vec};
2
3use miden_air::trace::chiplets::hasher::{HASH_CYCLE_LEN, HASH_CYCLE_LEN_FELT, STATE_WIDTH};
4
5use super::{
6 decoder::block_stack::{BlockInfo, BlockStack, BlockType, ExecutionContextInfo},
7 stack::OverflowTable,
8 trace_state::{
9 AceReplay, AdviceReplay, BitwiseReplay, BlockAddressReplay, BlockStackReplay,
10 CoreTraceFragmentContext, CoreTraceState, DecoderState, ExecutionContextReplay,
11 ExecutionContextSystemInfo, ExecutionReplay, HasherRequestReplay, HasherResponseReplay,
12 KernelReplay, MastForestResolutionReplay, MemoryReadsReplay, MemoryWritesReplay, NodeFlags,
13 RangeCheckerReplay, StackOverflowReplay, StackState, SystemState,
14 },
15 utils::split_u32_into_u16,
16};
17use crate::{
18 ContextId, EMPTY_WORD, FastProcessor, Felt, MIN_STACK_DEPTH, ONE, RowIndex, Word, ZERO,
19 continuation_stack::{Continuation, ContinuationStack},
20 crypto::merkle::MerklePath,
21 mast::{
22 BasicBlockNode, JoinNode, LoopNode, MastForest, MastNode, MastNodeExt, MastNodeId,
23 SplitNode,
24 },
25 precompile::PrecompileTranscript,
26 processor::{Processor, StackInterface, SystemInterface},
27 trace::chiplets::CircuitEvaluation,
28 tracer::{OperationHelperRegisters, Tracer},
29};
30
31#[derive(Debug)]
36struct StateSnapshot {
37 state: CoreTraceState,
38 continuation_stack: ContinuationStack,
39 initial_mast_forest: Arc<MastForest>,
40}
41
42pub struct TraceGenerationContext {
46 pub core_trace_contexts: Vec<CoreTraceFragmentContext>,
48
49 pub range_checker_replay: RangeCheckerReplay,
52 pub memory_writes: MemoryWritesReplay,
53 pub bitwise_replay: BitwiseReplay,
54 pub hasher_for_chiplet: HasherRequestReplay,
55 pub kernel_replay: KernelReplay,
56 pub ace_replay: AceReplay,
57
58 pub final_pc_transcript: PrecompileTranscript,
60
61 pub fragment_size: usize,
64}
65
66#[derive(Debug)]
79pub struct ExecutionTracer {
80 state_snapshot: Option<StateSnapshot>,
87
88 overflow_table: OverflowTable,
90 overflow_replay: StackOverflowReplay,
91
92 block_stack: BlockStack,
93 block_stack_replay: BlockStackReplay,
94 execution_context_replay: ExecutionContextReplay,
95
96 hasher_chiplet_shim: HasherChipletShim,
97 memory_reads: MemoryReadsReplay,
98 advice: AdviceReplay,
99 external: MastForestResolutionReplay,
100
101 range_checker: RangeCheckerReplay,
104 memory_writes: MemoryWritesReplay,
105 bitwise: BitwiseReplay,
106 kernel: KernelReplay,
107 hasher_for_chiplet: HasherRequestReplay,
108 ace: AceReplay,
109
110 fragment_contexts: Vec<CoreTraceFragmentContext>,
112
113 fragment_size: usize,
115}
116
117impl ExecutionTracer {
118 pub fn new(fragment_size: usize) -> Self {
120 Self {
121 state_snapshot: None,
122 overflow_table: OverflowTable::default(),
123 overflow_replay: StackOverflowReplay::default(),
124 block_stack: BlockStack::default(),
125 block_stack_replay: BlockStackReplay::default(),
126 execution_context_replay: ExecutionContextReplay::default(),
127 hasher_chiplet_shim: HasherChipletShim::default(),
128 memory_reads: MemoryReadsReplay::default(),
129 range_checker: RangeCheckerReplay::default(),
130 memory_writes: MemoryWritesReplay::default(),
131 advice: AdviceReplay::default(),
132 bitwise: BitwiseReplay::default(),
133 kernel: KernelReplay::default(),
134 hasher_for_chiplet: HasherRequestReplay::default(),
135 ace: AceReplay::default(),
136 external: MastForestResolutionReplay::default(),
137 fragment_contexts: Vec::new(),
138 fragment_size,
139 }
140 }
141
142 pub fn into_trace_generation_context(
148 mut self,
149 final_pc_transcript: PrecompileTranscript,
150 ) -> TraceGenerationContext {
151 self.finish_current_fragment_context();
153
154 TraceGenerationContext {
155 core_trace_contexts: self.fragment_contexts,
156 range_checker_replay: self.range_checker,
157 memory_writes: self.memory_writes,
158 bitwise_replay: self.bitwise,
159 kernel_replay: self.kernel,
160 hasher_for_chiplet: self.hasher_for_chiplet,
161 ace_replay: self.ace,
162 final_pc_transcript,
163 fragment_size: self.fragment_size,
164 }
165 }
166
167 fn start_new_fragment_context(
178 &mut self,
179 system_state: SystemState,
180 stack_top: [Felt; MIN_STACK_DEPTH],
181 mut continuation_stack: ContinuationStack,
182 continuation: Continuation,
183 current_forest: Arc<MastForest>,
184 ) {
185 self.finish_current_fragment_context();
187
188 self.state_snapshot = {
190 let decoder_state = {
191 if self.block_stack.is_empty() {
192 DecoderState { current_addr: ZERO, parent_addr: ZERO }
193 } else {
194 let block_info = self.block_stack.peek();
195
196 DecoderState {
197 current_addr: block_info.addr,
198 parent_addr: block_info.parent_addr,
199 }
200 }
201 };
202 let stack = {
203 let stack_depth =
204 MIN_STACK_DEPTH + self.overflow_table.num_elements_in_current_ctx();
205 let last_overflow_addr = self.overflow_table.last_update_clk_in_current_ctx();
206 StackState::new(stack_top, stack_depth, last_overflow_addr)
207 };
208
209 continuation_stack.push_continuation(continuation);
211
212 Some(StateSnapshot {
213 state: CoreTraceState {
214 system: system_state,
215 decoder: decoder_state,
216 stack,
217 },
218 continuation_stack,
219 initial_mast_forest: current_forest,
220 })
221 };
222 }
223
224 fn record_control_node_start<P: Processor>(
225 &mut self,
226 node: &MastNode,
227 processor: &P,
228 current_forest: &MastForest,
229 ) {
230 let (ctx_info, block_type) = match node {
231 MastNode::Join(node) => {
232 let child1_hash = current_forest
233 .get_node_by_id(node.first())
234 .expect("join node's first child expected to be in the forest")
235 .digest();
236 let child2_hash = current_forest
237 .get_node_by_id(node.second())
238 .expect("join node's second child expected to be in the forest")
239 .digest();
240 self.hasher_for_chiplet.record_hash_control_block(
241 child1_hash,
242 child2_hash,
243 JoinNode::DOMAIN,
244 node.digest(),
245 );
246
247 (None, BlockType::Join(false))
248 },
249 MastNode::Split(node) => {
250 let child1_hash = current_forest
251 .get_node_by_id(node.on_true())
252 .expect("split node's true child expected to be in the forest")
253 .digest();
254 let child2_hash = current_forest
255 .get_node_by_id(node.on_false())
256 .expect("split node's false child expected to be in the forest")
257 .digest();
258 self.hasher_for_chiplet.record_hash_control_block(
259 child1_hash,
260 child2_hash,
261 SplitNode::DOMAIN,
262 node.digest(),
263 );
264
265 (None, BlockType::Split)
266 },
267 MastNode::Loop(node) => {
268 let body_hash = current_forest
269 .get_node_by_id(node.body())
270 .expect("loop node's body expected to be in the forest")
271 .digest();
272
273 self.hasher_for_chiplet.record_hash_control_block(
274 body_hash,
275 EMPTY_WORD,
276 LoopNode::DOMAIN,
277 node.digest(),
278 );
279
280 let loop_entered = {
281 let condition = processor.stack().get(0);
282 condition == ONE
283 };
284
285 (None, BlockType::Loop(loop_entered))
286 },
287 MastNode::Call(node) => {
288 let callee_hash = current_forest
289 .get_node_by_id(node.callee())
290 .expect("call node's callee expected to be in the forest")
291 .digest();
292
293 self.hasher_for_chiplet.record_hash_control_block(
294 callee_hash,
295 EMPTY_WORD,
296 node.domain(),
297 node.digest(),
298 );
299
300 let exec_ctx = {
301 let overflow_addr = self.overflow_table.last_update_clk_in_current_ctx();
302 ExecutionContextInfo::new(
303 processor.system().ctx(),
304 processor.system().caller_hash(),
305 processor.stack().depth(),
306 overflow_addr,
307 )
308 };
309 let block_type = if node.is_syscall() {
310 BlockType::SysCall
311 } else {
312 BlockType::Call
313 };
314
315 (Some(exec_ctx), block_type)
316 },
317 MastNode::Dyn(dyn_node) => {
318 self.hasher_for_chiplet.record_hash_control_block(
319 EMPTY_WORD,
320 EMPTY_WORD,
321 dyn_node.domain(),
322 dyn_node.digest(),
323 );
324
325 if dyn_node.is_dyncall() {
326 let exec_ctx = {
327 let overflow_addr = self.overflow_table.last_update_clk_in_current_ctx();
328 let stack_depth_after_drop = processor.stack().depth() - 1;
337 ExecutionContextInfo::new(
338 processor.system().ctx(),
339 processor.system().caller_hash(),
340 stack_depth_after_drop,
341 overflow_addr,
342 )
343 };
344 (Some(exec_ctx), BlockType::Dyncall)
345 } else {
346 (None, BlockType::Dyn)
347 }
348 },
349 MastNode::Block(_) => panic!(
350 "`ExecutionTracer::record_basic_block_start()` must be called instead for basic blocks"
351 ),
352 MastNode::External(_) => panic!(
353 "External nodes are guaranteed to be resolved before record_control_node_start is called"
354 ),
355 };
356
357 let block_addr = self.hasher_chiplet_shim.record_hash_control_block();
358 let parent_addr = self.block_stack.push(block_addr, block_type, ctx_info);
359 self.block_stack_replay.record_node_start_parent_addr(parent_addr);
360 }
361
362 fn record_node_end(&mut self, block_info: &BlockInfo) {
364 let flags = NodeFlags::new(
365 block_info.is_loop_body() == ONE,
366 block_info.is_entered_loop() == ONE,
367 block_info.is_call() == ONE,
368 block_info.is_syscall() == ONE,
369 );
370 let (prev_addr, prev_parent_addr) = if self.block_stack.is_empty() {
371 (ZERO, ZERO)
372 } else {
373 let prev_block = self.block_stack.peek();
374 (prev_block.addr, prev_block.parent_addr)
375 };
376 self.block_stack_replay.record_node_end(
377 block_info.addr,
378 flags,
379 prev_addr,
380 prev_parent_addr,
381 );
382 }
383
384 fn record_execution_context(&mut self, ctx_info: ExecutionContextSystemInfo) {
386 self.execution_context_replay.record_execution_context(ctx_info);
387 }
388
389 fn finish_current_fragment_context(&mut self) {
398 if let Some(snapshot) = self.state_snapshot.take() {
399 let (hasher_replay, block_addr_replay) = self.hasher_chiplet_shim.extract_replay();
401 let memory_reads_replay = core::mem::take(&mut self.memory_reads);
402 let advice_replay = core::mem::take(&mut self.advice);
403 let external_replay = core::mem::take(&mut self.external);
404 let stack_overflow_replay = core::mem::take(&mut self.overflow_replay);
405 let block_stack_replay = core::mem::take(&mut self.block_stack_replay);
406 let execution_context_replay = core::mem::take(&mut self.execution_context_replay);
407
408 let trace_state = CoreTraceFragmentContext {
409 state: snapshot.state,
410 replay: ExecutionReplay {
411 hasher: hasher_replay,
412 block_address: block_addr_replay,
413 memory_reads: memory_reads_replay,
414 advice: advice_replay,
415 mast_forest_resolution: external_replay,
416 stack_overflow: stack_overflow_replay,
417 block_stack: block_stack_replay,
418 execution_context: execution_context_replay,
419 },
420 continuation: snapshot.continuation_stack,
421 initial_mast_forest: snapshot.initial_mast_forest,
422 };
423
424 self.fragment_contexts.push(trace_state);
425 }
426 }
427}
428
429impl Tracer for ExecutionTracer {
430 type Processor = FastProcessor;
431
432 fn start_clock_cycle(
435 &mut self,
436 processor: &FastProcessor,
437 continuation: Continuation,
438 continuation_stack: &ContinuationStack,
439 current_forest: &Arc<MastForest>,
440 ) {
441 if processor.system().clock().as_usize().is_multiple_of(self.fragment_size) {
443 self.start_new_fragment_context(
444 SystemState::from_processor(processor),
445 processor
446 .stack_top()
447 .try_into()
448 .expect("stack_top expected to be MIN_STACK_DEPTH elements"),
449 continuation_stack.clone(),
450 continuation.clone(),
451 current_forest.clone(),
452 );
453 }
454
455 match continuation {
457 Continuation::ResumeBasicBlock { .. } => {
458 },
460 Continuation::StartNode(mast_node_id) => match ¤t_forest[mast_node_id] {
461 MastNode::Join(_)
462 | MastNode::Split(_)
463 | MastNode::Loop(_)
464 | MastNode::Dyn(_)
465 | MastNode::Call(_) => {
466 self.record_control_node_start(
467 ¤t_forest[mast_node_id],
468 processor,
469 current_forest,
470 );
471 },
472 MastNode::Block(basic_block_node) => {
473 self.hasher_for_chiplet.record_hash_basic_block(
474 current_forest.clone(),
475 mast_node_id,
476 basic_block_node.digest(),
477 );
478 let block_addr =
479 self.hasher_chiplet_shim.record_hash_basic_block(basic_block_node);
480 let parent_addr =
481 self.block_stack.push(block_addr, BlockType::BasicBlock, None);
482 self.block_stack_replay.record_node_start_parent_addr(parent_addr);
483 },
484 MastNode::External(_) => unreachable!(
485 "start_clock_cycle is guaranteed not to be called on external nodes"
486 ),
487 },
488 Continuation::Respan { node_id: _, batch_index: _ } => {
489 self.block_stack.peek_mut().addr += HASH_CYCLE_LEN_FELT;
490 },
491 Continuation::FinishLoop { node_id: _, was_entered }
492 if was_entered && processor.stack_get(0) == ONE =>
493 {
494 },
496 Continuation::FinishJoin(_)
497 | Continuation::FinishSplit(_)
498 | Continuation::FinishCall(_)
499 | Continuation::FinishDyn(_)
500 | Continuation::FinishLoop { .. } | Continuation::FinishBasicBlock(_) => {
502 let block_info = self.block_stack.pop();
504 self.record_node_end(&block_info);
505
506 if let Some(ctx_info) = block_info.ctx_info {
507 self.record_execution_context(ExecutionContextSystemInfo {
508 parent_ctx: ctx_info.parent_ctx,
509 parent_fn_hash: ctx_info.parent_fn_hash,
510 });
511 }
512 },
513 Continuation::FinishExternal(_)
514 | Continuation::EnterForest(_)
515 | Continuation::AfterExitDecorators(_)
516 | Continuation::AfterExitDecoratorsBasicBlock(_) => {
517 panic!(
518 "FinishExternal, EnterForest, AfterExitDecorators and AfterExitDecoratorsBasicBlock continuations are guaranteed not to be passed here"
519 )
520 },
521 }
522 }
523
524 fn record_mast_forest_resolution(&mut self, node_id: MastNodeId, forest: &Arc<MastForest>) {
525 self.external.record_resolution(node_id, forest.clone());
526 }
527
528 fn record_hasher_permute(
529 &mut self,
530 input_state: [Felt; STATE_WIDTH],
531 output_state: [Felt; STATE_WIDTH],
532 ) {
533 self.hasher_for_chiplet.record_permute_input(input_state);
534 self.hasher_chiplet_shim.record_permute_output(output_state);
535 }
536
537 fn record_hasher_build_merkle_root(
538 &mut self,
539 node: Word,
540 path: Option<&MerklePath>,
541 index: Felt,
542 output_root: Word,
543 ) {
544 let path = path.expect("execution tracer expects a valid Merkle path");
545 self.hasher_chiplet_shim.record_build_merkle_root(path, output_root);
546 self.hasher_for_chiplet.record_build_merkle_root(node, path.clone(), index);
547 }
548
549 fn record_hasher_update_merkle_root(
550 &mut self,
551 old_value: Word,
552 new_value: Word,
553 path: Option<&MerklePath>,
554 index: Felt,
555 old_root: Word,
556 new_root: Word,
557 ) {
558 let path = path.expect("execution tracer expects a valid Merkle path");
559 self.hasher_chiplet_shim.record_update_merkle_root(path, old_root, new_root);
560 self.hasher_for_chiplet.record_update_merkle_root(
561 old_value,
562 new_value,
563 path.clone(),
564 index,
565 );
566 }
567
568 fn record_memory_read_element(
569 &mut self,
570 element: Felt,
571 addr: Felt,
572 ctx: ContextId,
573 clk: RowIndex,
574 ) {
575 self.memory_reads.record_read_element(element, addr, ctx, clk);
576 }
577
578 fn record_memory_read_word(&mut self, word: Word, addr: Felt, ctx: ContextId, clk: RowIndex) {
579 self.memory_reads.record_read_word(word, addr, ctx, clk);
580 }
581
582 fn record_memory_write_element(
583 &mut self,
584 element: Felt,
585 addr: Felt,
586 ctx: ContextId,
587 clk: RowIndex,
588 ) {
589 self.memory_writes.record_write_element(element, addr, ctx, clk);
590 }
591
592 fn record_memory_write_word(&mut self, word: Word, addr: Felt, ctx: ContextId, clk: RowIndex) {
593 self.memory_writes.record_write_word(word, addr, ctx, clk);
594 }
595
596 fn record_advice_pop_stack(&mut self, value: Felt) {
597 self.advice.record_pop_stack(value);
598 }
599
600 fn record_advice_pop_stack_word(&mut self, word: Word) {
601 self.advice.record_pop_stack_word(word);
602 }
603
604 fn record_advice_pop_stack_dword(&mut self, words: [Word; 2]) {
605 self.advice.record_pop_stack_dword(words);
606 }
607
608 fn record_u32and(&mut self, a: Felt, b: Felt) {
609 self.bitwise.record_u32and(a, b);
610 }
611
612 fn record_u32xor(&mut self, a: Felt, b: Felt) {
613 self.bitwise.record_u32xor(a, b);
614 }
615
616 fn record_u32_range_checks(&mut self, clk: RowIndex, u32_lo: Felt, u32_hi: Felt) {
617 let (t1, t0) = split_u32_into_u16(u32_lo.as_canonical_u64());
618 let (t3, t2) = split_u32_into_u16(u32_hi.as_canonical_u64());
619
620 self.range_checker.record_range_check_u32(clk, [t0, t1, t2, t3]);
621 }
622
623 fn record_kernel_proc_access(&mut self, proc_hash: Word) {
624 self.kernel.record_kernel_proc_access(proc_hash);
625 }
626
627 fn record_circuit_evaluation(&mut self, circuit_evaluation: CircuitEvaluation) {
628 self.ace.record_circuit_evaluation(circuit_evaluation);
629 }
630
631 fn finalize_clock_cycle(
632 &mut self,
633 _processor: &FastProcessor,
634 _op_helper_registers: OperationHelperRegisters,
635 _current_forest: &Arc<MastForest>,
636 ) {
637 }
639
640 fn increment_stack_size(&mut self, processor: &FastProcessor) {
641 let new_overflow_value = processor.stack_get(16);
642 self.overflow_table.push(new_overflow_value, processor.system().clock());
643 }
644
645 fn decrement_stack_size(&mut self) {
646 if let Some(popped_value) = self.overflow_table.pop() {
648 let new_overflow_addr = self.overflow_table.last_update_clk_in_current_ctx();
649 self.overflow_replay.record_pop_overflow(popped_value, new_overflow_addr);
650 }
651 }
652
653 fn start_context(&mut self) {
654 self.overflow_table.start_context();
655 }
656
657 fn restore_context(&mut self) {
658 self.overflow_table.restore_context();
659 self.overflow_replay.record_restore_context_overflow_addr(
660 MIN_STACK_DEPTH + self.overflow_table.num_elements_in_current_ctx(),
661 self.overflow_table.last_update_clk_in_current_ctx(),
662 );
663 }
664}
665
666const NUM_HASHER_ROWS_PER_PERMUTATION: u32 = HASH_CYCLE_LEN as u32;
672
673#[derive(Debug)]
680pub struct HasherChipletShim {
681 addr: u32,
685 hasher_replay: HasherResponseReplay,
687 block_addr_replay: BlockAddressReplay,
688}
689
690impl HasherChipletShim {
691 pub fn new() -> Self {
693 Self {
694 addr: 1,
695 hasher_replay: HasherResponseReplay::default(),
696 block_addr_replay: BlockAddressReplay::default(),
697 }
698 }
699
700 pub fn record_hash_control_block(&mut self) -> Felt {
702 let block_addr = Felt::from_u32(self.addr);
703
704 self.block_addr_replay.record_block_address(block_addr);
705 self.addr += NUM_HASHER_ROWS_PER_PERMUTATION;
706
707 block_addr
708 }
709
710 pub fn record_hash_basic_block(&mut self, basic_block_node: &BasicBlockNode) -> Felt {
712 let block_addr = Felt::from_u32(self.addr);
713
714 self.block_addr_replay.record_block_address(block_addr);
715 self.addr += NUM_HASHER_ROWS_PER_PERMUTATION * basic_block_node.num_op_batches() as u32;
716
717 block_addr
718 }
719 pub fn record_permute_output(&mut self, hashed_state: [Felt; 12]) {
721 self.hasher_replay.record_permute(Felt::from_u32(self.addr), hashed_state);
722 self.addr += NUM_HASHER_ROWS_PER_PERMUTATION;
723 }
724
725 pub fn record_build_merkle_root(&mut self, path: &MerklePath, computed_root: Word) {
727 self.hasher_replay
728 .record_build_merkle_root(Felt::from_u32(self.addr), computed_root);
729 self.addr += NUM_HASHER_ROWS_PER_PERMUTATION * path.depth() as u32;
730 }
731
732 pub fn record_update_merkle_root(&mut self, path: &MerklePath, old_root: Word, new_root: Word) {
734 self.hasher_replay
735 .record_update_merkle_root(Felt::from_u32(self.addr), old_root, new_root);
736
737 self.addr += 2 * NUM_HASHER_ROWS_PER_PERMUTATION * path.depth() as u32;
739 }
740
741 pub fn extract_replay(&mut self) -> (HasherResponseReplay, BlockAddressReplay) {
742 (
743 core::mem::take(&mut self.hasher_replay),
744 core::mem::take(&mut self.block_addr_replay),
745 )
746 }
747}
748
749impl Default for HasherChipletShim {
750 fn default() -> Self {
751 Self::new()
752 }
753}