1use alloc::{sync::Arc, vec::Vec};
2
3use miden_air::trace::chiplets::hasher::{HASH_CYCLE_LEN, HASH_CYCLE_LEN_FELT, STATE_WIDTH};
4
5use super::{
6 decoder::block_stack::{BlockInfo, BlockStack, BlockType, ExecutionContextInfo},
7 stack::OverflowTable,
8 trace_state::{
9 AceReplay, AdviceReplay, BitwiseReplay, BlockAddressReplay, BlockStackReplay,
10 CoreTraceFragmentContext, CoreTraceState, DecoderState, ExecutionContextReplay,
11 ExecutionContextSystemInfo, ExecutionReplay, HasherRequestReplay, HasherResponseReplay,
12 KernelReplay, MastForestResolutionReplay, MemoryReadsReplay, MemoryWritesReplay, NodeFlags,
13 RangeCheckerReplay, StackOverflowReplay, StackState, SystemState,
14 },
15 utils::split_u32_into_u16,
16};
17use crate::{
18 ContextId, EMPTY_WORD, FastProcessor, Felt, MIN_STACK_DEPTH, ONE, RowIndex, Word, ZERO,
19 continuation_stack::{Continuation, ContinuationStack},
20 crypto::merkle::MerklePath,
21 field::{PrimeCharacteristicRing, PrimeField64},
22 mast::{
23 BasicBlockNode, JoinNode, LoopNode, MastForest, MastNode, MastNodeExt, MastNodeId,
24 SplitNode,
25 },
26 precompile::PrecompileTranscript,
27 processor::{Processor, StackInterface, SystemInterface},
28 trace::chiplets::CircuitEvaluation,
29 tracer::{OperationHelperRegisters, Tracer},
30};
31
32#[derive(Debug)]
37struct StateSnapshot {
38 state: CoreTraceState,
39 continuation_stack: ContinuationStack,
40 initial_mast_forest: Arc<MastForest>,
41}
42
43pub struct TraceGenerationContext {
47 pub core_trace_contexts: Vec<CoreTraceFragmentContext>,
49
50 pub range_checker_replay: RangeCheckerReplay,
53 pub memory_writes: MemoryWritesReplay,
54 pub bitwise_replay: BitwiseReplay,
55 pub hasher_for_chiplet: HasherRequestReplay,
56 pub kernel_replay: KernelReplay,
57 pub ace_replay: AceReplay,
58
59 pub final_pc_transcript: PrecompileTranscript,
61
62 pub fragment_size: usize,
65}
66
67#[derive(Debug)]
80pub struct ExecutionTracer {
81 state_snapshot: Option<StateSnapshot>,
88
89 overflow_table: OverflowTable,
91 overflow_replay: StackOverflowReplay,
92
93 block_stack: BlockStack,
94 block_stack_replay: BlockStackReplay,
95 execution_context_replay: ExecutionContextReplay,
96
97 hasher_chiplet_shim: HasherChipletShim,
98 memory_reads: MemoryReadsReplay,
99 advice: AdviceReplay,
100 external: MastForestResolutionReplay,
101
102 range_checker: RangeCheckerReplay,
105 memory_writes: MemoryWritesReplay,
106 bitwise: BitwiseReplay,
107 kernel: KernelReplay,
108 hasher_for_chiplet: HasherRequestReplay,
109 ace: AceReplay,
110
111 fragment_contexts: Vec<CoreTraceFragmentContext>,
113
114 fragment_size: usize,
116}
117
118impl ExecutionTracer {
119 pub fn new(fragment_size: usize) -> Self {
121 Self {
122 state_snapshot: None,
123 overflow_table: OverflowTable::default(),
124 overflow_replay: StackOverflowReplay::default(),
125 block_stack: BlockStack::default(),
126 block_stack_replay: BlockStackReplay::default(),
127 execution_context_replay: ExecutionContextReplay::default(),
128 hasher_chiplet_shim: HasherChipletShim::default(),
129 memory_reads: MemoryReadsReplay::default(),
130 range_checker: RangeCheckerReplay::default(),
131 memory_writes: MemoryWritesReplay::default(),
132 advice: AdviceReplay::default(),
133 bitwise: BitwiseReplay::default(),
134 kernel: KernelReplay::default(),
135 hasher_for_chiplet: HasherRequestReplay::default(),
136 ace: AceReplay::default(),
137 external: MastForestResolutionReplay::default(),
138 fragment_contexts: Vec::new(),
139 fragment_size,
140 }
141 }
142
143 pub fn into_trace_generation_context(
149 mut self,
150 final_pc_transcript: PrecompileTranscript,
151 ) -> TraceGenerationContext {
152 self.finish_current_fragment_context();
154
155 TraceGenerationContext {
156 core_trace_contexts: self.fragment_contexts,
157 range_checker_replay: self.range_checker,
158 memory_writes: self.memory_writes,
159 bitwise_replay: self.bitwise,
160 kernel_replay: self.kernel,
161 hasher_for_chiplet: self.hasher_for_chiplet,
162 ace_replay: self.ace,
163 final_pc_transcript,
164 fragment_size: self.fragment_size,
165 }
166 }
167
168 fn start_new_fragment_context(
179 &mut self,
180 system_state: SystemState,
181 stack_top: [Felt; MIN_STACK_DEPTH],
182 mut continuation_stack: ContinuationStack,
183 continuation: Continuation,
184 current_forest: Arc<MastForest>,
185 ) {
186 self.finish_current_fragment_context();
188
189 self.state_snapshot = {
191 let decoder_state = {
192 if self.block_stack.is_empty() {
193 DecoderState { current_addr: ZERO, parent_addr: ZERO }
194 } else {
195 let block_info = self.block_stack.peek();
196
197 DecoderState {
198 current_addr: block_info.addr,
199 parent_addr: block_info.parent_addr,
200 }
201 }
202 };
203 let stack = {
204 let stack_depth =
205 MIN_STACK_DEPTH + self.overflow_table.num_elements_in_current_ctx();
206 let last_overflow_addr = self.overflow_table.last_update_clk_in_current_ctx();
207 StackState::new(stack_top, stack_depth, last_overflow_addr)
208 };
209
210 continuation_stack.push_continuation(continuation);
212
213 Some(StateSnapshot {
214 state: CoreTraceState {
215 system: system_state,
216 decoder: decoder_state,
217 stack,
218 },
219 continuation_stack,
220 initial_mast_forest: current_forest,
221 })
222 };
223 }
224
225 fn record_control_node_start<P: Processor>(
226 &mut self,
227 node: &MastNode,
228 processor: &P,
229 current_forest: &MastForest,
230 ) {
231 let (ctx_info, block_type) = match node {
232 MastNode::Join(node) => {
233 let child1_hash = current_forest
234 .get_node_by_id(node.first())
235 .expect("join node's first child expected to be in the forest")
236 .digest();
237 let child2_hash = current_forest
238 .get_node_by_id(node.second())
239 .expect("join node's second child expected to be in the forest")
240 .digest();
241 self.hasher_for_chiplet.record_hash_control_block(
242 child1_hash,
243 child2_hash,
244 JoinNode::DOMAIN,
245 node.digest(),
246 );
247
248 (None, BlockType::Join(false))
249 },
250 MastNode::Split(node) => {
251 let child1_hash = current_forest
252 .get_node_by_id(node.on_true())
253 .expect("split node's true child expected to be in the forest")
254 .digest();
255 let child2_hash = current_forest
256 .get_node_by_id(node.on_false())
257 .expect("split node's false child expected to be in the forest")
258 .digest();
259 self.hasher_for_chiplet.record_hash_control_block(
260 child1_hash,
261 child2_hash,
262 SplitNode::DOMAIN,
263 node.digest(),
264 );
265
266 (None, BlockType::Split)
267 },
268 MastNode::Loop(node) => {
269 let body_hash = current_forest
270 .get_node_by_id(node.body())
271 .expect("loop node's body expected to be in the forest")
272 .digest();
273
274 self.hasher_for_chiplet.record_hash_control_block(
275 body_hash,
276 EMPTY_WORD,
277 LoopNode::DOMAIN,
278 node.digest(),
279 );
280
281 let loop_entered = {
282 let condition = processor.stack().get(0);
283 condition == ONE
284 };
285
286 (None, BlockType::Loop(loop_entered))
287 },
288 MastNode::Call(node) => {
289 let callee_hash = current_forest
290 .get_node_by_id(node.callee())
291 .expect("call node's callee expected to be in the forest")
292 .digest();
293
294 self.hasher_for_chiplet.record_hash_control_block(
295 callee_hash,
296 EMPTY_WORD,
297 node.domain(),
298 node.digest(),
299 );
300
301 let exec_ctx = {
302 let overflow_addr = self.overflow_table.last_update_clk_in_current_ctx();
303 ExecutionContextInfo::new(
304 processor.system().ctx(),
305 processor.system().caller_hash(),
306 processor.stack().depth(),
307 overflow_addr,
308 )
309 };
310 let block_type = if node.is_syscall() {
311 BlockType::SysCall
312 } else {
313 BlockType::Call
314 };
315
316 (Some(exec_ctx), block_type)
317 },
318 MastNode::Dyn(dyn_node) => {
319 self.hasher_for_chiplet.record_hash_control_block(
320 EMPTY_WORD,
321 EMPTY_WORD,
322 dyn_node.domain(),
323 dyn_node.digest(),
324 );
325
326 if dyn_node.is_dyncall() {
327 let exec_ctx = {
328 let overflow_addr = self.overflow_table.last_update_clk_in_current_ctx();
329 let stack_depth_after_drop = processor.stack().depth() - 1;
338 ExecutionContextInfo::new(
339 processor.system().ctx(),
340 processor.system().caller_hash(),
341 stack_depth_after_drop,
342 overflow_addr,
343 )
344 };
345 (Some(exec_ctx), BlockType::Dyncall)
346 } else {
347 (None, BlockType::Dyn)
348 }
349 },
350 MastNode::Block(_) => panic!(
351 "`ExecutionTracer::record_basic_block_start()` must be called instead for basic blocks"
352 ),
353 MastNode::External(_) => panic!(
354 "External nodes are guaranteed to be resolved before record_control_node_start is called"
355 ),
356 };
357
358 let block_addr = self.hasher_chiplet_shim.record_hash_control_block();
359 let parent_addr = self.block_stack.push(block_addr, block_type, ctx_info);
360 self.block_stack_replay.record_node_start_parent_addr(parent_addr);
361 }
362
363 fn record_node_end(&mut self, block_info: &BlockInfo) {
365 let flags = NodeFlags::new(
366 block_info.is_loop_body() == ONE,
367 block_info.is_entered_loop() == ONE,
368 block_info.is_call() == ONE,
369 block_info.is_syscall() == ONE,
370 );
371 let (prev_addr, prev_parent_addr) = if self.block_stack.is_empty() {
372 (ZERO, ZERO)
373 } else {
374 let prev_block = self.block_stack.peek();
375 (prev_block.addr, prev_block.parent_addr)
376 };
377 self.block_stack_replay.record_node_end(
378 block_info.addr,
379 flags,
380 prev_addr,
381 prev_parent_addr,
382 );
383 }
384
385 fn record_execution_context(&mut self, ctx_info: ExecutionContextSystemInfo) {
387 self.execution_context_replay.record_execution_context(ctx_info);
388 }
389
390 fn finish_current_fragment_context(&mut self) {
399 if let Some(snapshot) = self.state_snapshot.take() {
400 let (hasher_replay, block_addr_replay) = self.hasher_chiplet_shim.extract_replay();
402 let memory_reads_replay = core::mem::take(&mut self.memory_reads);
403 let advice_replay = core::mem::take(&mut self.advice);
404 let external_replay = core::mem::take(&mut self.external);
405 let stack_overflow_replay = core::mem::take(&mut self.overflow_replay);
406 let block_stack_replay = core::mem::take(&mut self.block_stack_replay);
407 let execution_context_replay = core::mem::take(&mut self.execution_context_replay);
408
409 let trace_state = CoreTraceFragmentContext {
410 state: snapshot.state,
411 replay: ExecutionReplay {
412 hasher: hasher_replay,
413 block_address: block_addr_replay,
414 memory_reads: memory_reads_replay,
415 advice: advice_replay,
416 mast_forest_resolution: external_replay,
417 stack_overflow: stack_overflow_replay,
418 block_stack: block_stack_replay,
419 execution_context: execution_context_replay,
420 },
421 continuation: snapshot.continuation_stack,
422 initial_mast_forest: snapshot.initial_mast_forest,
423 };
424
425 self.fragment_contexts.push(trace_state);
426 }
427 }
428}
429
430impl Tracer for ExecutionTracer {
431 type Processor = FastProcessor;
432
433 fn start_clock_cycle(
436 &mut self,
437 processor: &FastProcessor,
438 continuation: Continuation,
439 continuation_stack: &ContinuationStack,
440 current_forest: &Arc<MastForest>,
441 ) {
442 if processor.system().clock().as_usize().is_multiple_of(self.fragment_size) {
444 self.start_new_fragment_context(
445 SystemState::from_processor(processor),
446 processor
447 .stack_top()
448 .try_into()
449 .expect("stack_top expected to be MIN_STACK_DEPTH elements"),
450 continuation_stack.clone(),
451 continuation.clone(),
452 current_forest.clone(),
453 );
454 }
455
456 match continuation {
458 Continuation::ResumeBasicBlock { .. } => {
459 },
461 Continuation::StartNode(mast_node_id) => match ¤t_forest[mast_node_id] {
462 MastNode::Join(_)
463 | MastNode::Split(_)
464 | MastNode::Loop(_)
465 | MastNode::Dyn(_)
466 | MastNode::Call(_) => {
467 self.record_control_node_start(
468 ¤t_forest[mast_node_id],
469 processor,
470 current_forest,
471 );
472 },
473 MastNode::Block(basic_block_node) => {
474 self.hasher_for_chiplet.record_hash_basic_block(
475 current_forest.clone(),
476 mast_node_id,
477 basic_block_node.digest(),
478 );
479 let block_addr =
480 self.hasher_chiplet_shim.record_hash_basic_block(basic_block_node);
481 let parent_addr =
482 self.block_stack.push(block_addr, BlockType::BasicBlock, None);
483 self.block_stack_replay.record_node_start_parent_addr(parent_addr);
484 },
485 MastNode::External(_) => unreachable!(
486 "start_clock_cycle is guaranteed not to be called on external nodes"
487 ),
488 },
489 Continuation::Respan { node_id: _, batch_index: _ } => {
490 self.block_stack.peek_mut().addr += HASH_CYCLE_LEN_FELT;
491 },
492 Continuation::FinishLoop { node_id: _, was_entered }
493 if was_entered && processor.stack_get(0) == ONE =>
494 {
495 },
497 Continuation::FinishJoin(_)
498 | Continuation::FinishSplit(_)
499 | Continuation::FinishCall(_)
500 | Continuation::FinishDyn(_)
501 | Continuation::FinishLoop { .. } | Continuation::FinishBasicBlock(_) => {
503 let block_info = self.block_stack.pop();
505 self.record_node_end(&block_info);
506
507 if let Some(ctx_info) = block_info.ctx_info {
508 self.record_execution_context(ExecutionContextSystemInfo {
509 parent_ctx: ctx_info.parent_ctx,
510 parent_fn_hash: ctx_info.parent_fn_hash,
511 });
512 }
513 },
514 Continuation::FinishExternal(_)
515 | Continuation::EnterForest(_)
516 | Continuation::AfterExitDecorators(_)
517 | Continuation::AfterExitDecoratorsBasicBlock(_) => {
518 panic!(
519 "FinishExternal, EnterForest, AfterExitDecorators and AfterExitDecoratorsBasicBlock continuations are guaranteed not to be passed here"
520 )
521 },
522 }
523 }
524
525 fn record_mast_forest_resolution(&mut self, node_id: MastNodeId, forest: &Arc<MastForest>) {
526 self.external.record_resolution(node_id, forest.clone());
527 }
528
529 fn record_hasher_permute(
530 &mut self,
531 input_state: [Felt; STATE_WIDTH],
532 output_state: [Felt; STATE_WIDTH],
533 ) {
534 self.hasher_for_chiplet.record_permute_input(input_state);
535 self.hasher_chiplet_shim.record_permute_output(output_state);
536 }
537
538 fn record_hasher_build_merkle_root(
539 &mut self,
540 node: Word,
541 path: Option<&MerklePath>,
542 index: Felt,
543 output_root: Word,
544 ) {
545 let path = path.expect("execution tracer expects a valid Merkle path");
546 self.hasher_chiplet_shim.record_build_merkle_root(path, output_root);
547 self.hasher_for_chiplet.record_build_merkle_root(node, path.clone(), index);
548 }
549
550 fn record_hasher_update_merkle_root(
551 &mut self,
552 old_value: Word,
553 new_value: Word,
554 path: Option<&MerklePath>,
555 index: Felt,
556 old_root: Word,
557 new_root: Word,
558 ) {
559 let path = path.expect("execution tracer expects a valid Merkle path");
560 self.hasher_chiplet_shim.record_update_merkle_root(path, old_root, new_root);
561 self.hasher_for_chiplet.record_update_merkle_root(
562 old_value,
563 new_value,
564 path.clone(),
565 index,
566 );
567 }
568
569 fn record_memory_read_element(
570 &mut self,
571 element: Felt,
572 addr: Felt,
573 ctx: ContextId,
574 clk: RowIndex,
575 ) {
576 self.memory_reads.record_read_element(element, addr, ctx, clk);
577 }
578
579 fn record_memory_read_word(&mut self, word: Word, addr: Felt, ctx: ContextId, clk: RowIndex) {
580 self.memory_reads.record_read_word(word, addr, ctx, clk);
581 }
582
583 fn record_memory_write_element(
584 &mut self,
585 element: Felt,
586 addr: Felt,
587 ctx: ContextId,
588 clk: RowIndex,
589 ) {
590 self.memory_writes.record_write_element(element, addr, ctx, clk);
591 }
592
593 fn record_memory_write_word(&mut self, word: Word, addr: Felt, ctx: ContextId, clk: RowIndex) {
594 self.memory_writes.record_write_word(word, addr, ctx, clk);
595 }
596
597 fn record_advice_pop_stack(&mut self, value: Felt) {
598 self.advice.record_pop_stack(value);
599 }
600
601 fn record_advice_pop_stack_word(&mut self, word: Word) {
602 self.advice.record_pop_stack_word(word);
603 }
604
605 fn record_advice_pop_stack_dword(&mut self, words: [Word; 2]) {
606 self.advice.record_pop_stack_dword(words);
607 }
608
609 fn record_u32and(&mut self, a: Felt, b: Felt) {
610 self.bitwise.record_u32and(a, b);
611 }
612
613 fn record_u32xor(&mut self, a: Felt, b: Felt) {
614 self.bitwise.record_u32xor(a, b);
615 }
616
617 fn record_u32_range_checks(&mut self, clk: RowIndex, u32_lo: Felt, u32_hi: Felt) {
618 let (t1, t0) = split_u32_into_u16(u32_lo.as_canonical_u64());
619 let (t3, t2) = split_u32_into_u16(u32_hi.as_canonical_u64());
620
621 self.range_checker.record_range_check_u32(clk, [t0, t1, t2, t3]);
622 }
623
624 fn record_kernel_proc_access(&mut self, proc_hash: Word) {
625 self.kernel.record_kernel_proc_access(proc_hash);
626 }
627
628 fn record_circuit_evaluation(&mut self, circuit_evaluation: CircuitEvaluation) {
629 self.ace.record_circuit_evaluation(circuit_evaluation);
630 }
631
632 fn finalize_clock_cycle(
633 &mut self,
634 _processor: &FastProcessor,
635 _op_helper_registers: OperationHelperRegisters,
636 _current_forest: &Arc<MastForest>,
637 ) {
638 }
640
641 fn increment_stack_size(&mut self, processor: &FastProcessor) {
642 let new_overflow_value = processor.stack_get(16);
643 self.overflow_table.push(new_overflow_value, processor.system().clock());
644 }
645
646 fn decrement_stack_size(&mut self) {
647 if let Some(popped_value) = self.overflow_table.pop() {
649 let new_overflow_addr = self.overflow_table.last_update_clk_in_current_ctx();
650 self.overflow_replay.record_pop_overflow(popped_value, new_overflow_addr);
651 }
652 }
653
654 fn start_context(&mut self) {
655 self.overflow_table.start_context();
656 }
657
658 fn restore_context(&mut self) {
659 self.overflow_table.restore_context();
660 self.overflow_replay.record_restore_context_overflow_addr(
661 MIN_STACK_DEPTH + self.overflow_table.num_elements_in_current_ctx(),
662 self.overflow_table.last_update_clk_in_current_ctx(),
663 );
664 }
665}
666
667const NUM_HASHER_ROWS_PER_PERMUTATION: u32 = HASH_CYCLE_LEN as u32;
673
674#[derive(Debug)]
681pub struct HasherChipletShim {
682 addr: u32,
686 hasher_replay: HasherResponseReplay,
688 block_addr_replay: BlockAddressReplay,
689}
690
691impl HasherChipletShim {
692 pub fn new() -> Self {
694 Self {
695 addr: 1,
696 hasher_replay: HasherResponseReplay::default(),
697 block_addr_replay: BlockAddressReplay::default(),
698 }
699 }
700
701 pub fn record_hash_control_block(&mut self) -> Felt {
703 let block_addr = Felt::from_u32(self.addr);
704
705 self.block_addr_replay.record_block_address(block_addr);
706 self.addr += NUM_HASHER_ROWS_PER_PERMUTATION;
707
708 block_addr
709 }
710
711 pub fn record_hash_basic_block(&mut self, basic_block_node: &BasicBlockNode) -> Felt {
713 let block_addr = Felt::from_u32(self.addr);
714
715 self.block_addr_replay.record_block_address(block_addr);
716 self.addr += NUM_HASHER_ROWS_PER_PERMUTATION * basic_block_node.num_op_batches() as u32;
717
718 block_addr
719 }
720 pub fn record_permute_output(&mut self, hashed_state: [Felt; 12]) {
722 self.hasher_replay.record_permute(Felt::from_u32(self.addr), hashed_state);
723 self.addr += NUM_HASHER_ROWS_PER_PERMUTATION;
724 }
725
726 pub fn record_build_merkle_root(&mut self, path: &MerklePath, computed_root: Word) {
728 self.hasher_replay
729 .record_build_merkle_root(Felt::from_u32(self.addr), computed_root);
730 self.addr += NUM_HASHER_ROWS_PER_PERMUTATION * path.depth() as u32;
731 }
732
733 pub fn record_update_merkle_root(&mut self, path: &MerklePath, old_root: Word, new_root: Word) {
735 self.hasher_replay
736 .record_update_merkle_root(Felt::from_u32(self.addr), old_root, new_root);
737
738 self.addr += 2 * NUM_HASHER_ROWS_PER_PERMUTATION * path.depth() as u32;
740 }
741
742 pub fn extract_replay(&mut self) -> (HasherResponseReplay, BlockAddressReplay) {
743 (
744 core::mem::take(&mut self.hasher_replay),
745 core::mem::take(&mut self.block_addr_replay),
746 )
747 }
748}
749
750impl Default for HasherChipletShim {
751 fn default() -> Self {
752 Self::new()
753 }
754}