1use alloc::{sync::Arc, vec::Vec};
2
3use miden_air::trace::{chiplets::hasher::STATE_WIDTH, rows::RowIndex};
4use miden_core::{
5 EMPTY_WORD, Felt, ONE, Word, ZERO,
6 crypto::merkle::MerklePath,
7 mast::{
8 BasicBlockNode, JoinNode, LoopNode, MastForest, MastNode, MastNodeExt, MastNodeId,
9 SplitNode,
10 },
11 precompile::PrecompileTranscript,
12 stack::MIN_STACK_DEPTH,
13};
14
15use crate::{
16 chiplets::CircuitEvaluation,
17 continuation_stack::ContinuationStack,
18 decoder::block_stack::{BlockInfo, BlockStack, BlockType, ExecutionContextInfo},
19 fast::{
20 FastProcessor,
21 trace_state::{
22 AceReplay, AdviceReplay, BitwiseReplay, BlockStackReplay, CoreTraceFragmentContext,
23 CoreTraceState, DecoderState, ExecutionContextSystemInfo, ExecutionReplay,
24 HasherRequestReplay, HasherResponseReplay, KernelReplay, MastForestResolutionReplay,
25 MemoryReadsReplay, MemoryWritesReplay, NodeExecutionState, NodeFlags,
26 RangeCheckerReplay, StackOverflowReplay, StackState, SystemState,
27 },
28 tracer::Tracer,
29 },
30 stack::OverflowTable,
31 system::ContextId,
32 utils::split_u32_into_u16,
33};
34
35const HASH_CYCLE_LEN: Felt = Felt::new(miden_air::trace::chiplets::hasher::HASH_CYCLE_LEN as u64);
38
39#[derive(Debug)]
41struct StateSnapshot {
42 state: CoreTraceState,
43 continuation_stack: ContinuationStack,
44 execution_state: NodeExecutionState,
45 initial_mast_forest: Arc<MastForest>,
46}
47
48pub struct TraceGenerationContext {
49 pub core_trace_contexts: Vec<CoreTraceFragmentContext>,
51
52 pub range_checker_replay: RangeCheckerReplay,
55 pub memory_writes: MemoryWritesReplay,
56 pub bitwise_replay: BitwiseReplay,
57 pub hasher_for_chiplet: HasherRequestReplay,
58 pub kernel_replay: KernelReplay,
59 pub ace_replay: AceReplay,
60
61 pub final_pc_transcript: PrecompileTranscript,
63
64 pub fragment_size: usize,
67}
68
69#[derive(Debug)]
82pub struct ExecutionTracer {
83 state_snapshot: Option<StateSnapshot>,
90
91 pub overflow_table: OverflowTable,
93 pub overflow_replay: StackOverflowReplay,
94
95 pub block_stack: BlockStack,
96 pub block_stack_replay: BlockStackReplay,
97
98 pub hasher_chiplet_shim: HasherChipletShim,
99 pub memory_reads: MemoryReadsReplay,
100 pub advice: AdviceReplay,
101 pub external: MastForestResolutionReplay,
102
103 pub range_checker: RangeCheckerReplay,
106 pub memory_writes: MemoryWritesReplay,
107 pub bitwise: BitwiseReplay,
108 pub kernel: KernelReplay,
109 pub hasher_for_chiplet: HasherRequestReplay,
110 pub ace: AceReplay,
111
112 fragment_contexts: Vec<CoreTraceFragmentContext>,
114
115 fragment_size: usize,
117}
118
119impl ExecutionTracer {
120 pub fn new(fragment_size: usize) -> Self {
122 Self {
123 state_snapshot: None,
124 overflow_table: OverflowTable::default(),
125 overflow_replay: StackOverflowReplay::default(),
126 block_stack: BlockStack::default(),
127 block_stack_replay: BlockStackReplay::default(),
128 hasher_chiplet_shim: HasherChipletShim::default(),
129 memory_reads: MemoryReadsReplay::default(),
130 range_checker: RangeCheckerReplay::default(),
131 memory_writes: MemoryWritesReplay::default(),
132 advice: AdviceReplay::default(),
133 bitwise: BitwiseReplay::default(),
134 kernel: KernelReplay::default(),
135 hasher_for_chiplet: HasherRequestReplay::default(),
136 ace: AceReplay::default(),
137 external: MastForestResolutionReplay::default(),
138 fragment_contexts: Vec::new(),
139 fragment_size,
140 }
141 }
142
143 pub fn into_trace_generation_context(
149 mut self,
150 final_pc_transcript: PrecompileTranscript,
151 ) -> TraceGenerationContext {
152 self.finish_current_fragment_context();
154
155 TraceGenerationContext {
156 core_trace_contexts: self.fragment_contexts,
157 range_checker_replay: self.range_checker,
158 memory_writes: self.memory_writes,
159 bitwise_replay: self.bitwise,
160 kernel_replay: self.kernel,
161 hasher_for_chiplet: self.hasher_for_chiplet,
162 ace_replay: self.ace,
163 final_pc_transcript,
164 fragment_size: self.fragment_size,
165 }
166 }
167
168 fn start_new_fragment_context(
179 &mut self,
180 system_state: SystemState,
181 stack_top: [Felt; MIN_STACK_DEPTH],
182 continuation_stack: ContinuationStack,
183 execution_state: NodeExecutionState,
184 current_forest: Arc<MastForest>,
185 ) {
186 self.finish_current_fragment_context();
188
189 self.state_snapshot = {
191 let decoder_state = {
192 if self.block_stack.is_empty() {
193 DecoderState { current_addr: ZERO, parent_addr: ZERO }
194 } else {
195 let block_info = self.block_stack.peek();
196
197 DecoderState {
198 current_addr: block_info.addr,
199 parent_addr: block_info.parent_addr,
200 }
201 }
202 };
203 let stack = {
204 let stack_depth =
205 MIN_STACK_DEPTH + self.overflow_table.num_elements_in_current_ctx();
206 let last_overflow_addr = self.overflow_table.last_update_clk_in_current_ctx();
207 StackState::new(stack_top, stack_depth, last_overflow_addr)
208 };
209
210 Some(StateSnapshot {
211 state: CoreTraceState {
212 system: system_state,
213 decoder: decoder_state,
214 stack,
215 },
216 continuation_stack,
217 execution_state,
218 initial_mast_forest: current_forest,
219 })
220 };
221 }
222
223 fn record_control_node_start(
224 &mut self,
225 node: &MastNode,
226 processor: &FastProcessor,
227 current_forest: &MastForest,
228 ) {
229 let (ctx_info, block_type) = match node {
230 MastNode::Join(node) => {
231 let child1_hash = current_forest
232 .get_node_by_id(node.first())
233 .expect("join node's first child expected to be in the forest")
234 .digest();
235 let child2_hash = current_forest
236 .get_node_by_id(node.second())
237 .expect("join node's second child expected to be in the forest")
238 .digest();
239 self.hasher_for_chiplet.record_hash_control_block(
240 child1_hash,
241 child2_hash,
242 JoinNode::DOMAIN,
243 node.digest(),
244 );
245
246 (None, BlockType::Join(false))
247 },
248 MastNode::Split(node) => {
249 let child1_hash = current_forest
250 .get_node_by_id(node.on_true())
251 .expect("split node's true child expected to be in the forest")
252 .digest();
253 let child2_hash = current_forest
254 .get_node_by_id(node.on_false())
255 .expect("split node's false child expected to be in the forest")
256 .digest();
257 self.hasher_for_chiplet.record_hash_control_block(
258 child1_hash,
259 child2_hash,
260 SplitNode::DOMAIN,
261 node.digest(),
262 );
263
264 (None, BlockType::Split)
265 },
266 MastNode::Loop(node) => {
267 let body_hash = current_forest
268 .get_node_by_id(node.body())
269 .expect("loop node's body expected to be in the forest")
270 .digest();
271
272 self.hasher_for_chiplet.record_hash_control_block(
273 body_hash,
274 EMPTY_WORD,
275 LoopNode::DOMAIN,
276 node.digest(),
277 );
278
279 let loop_entered = {
280 let condition = processor.stack_get(0);
281 condition == ONE
282 };
283
284 (None, BlockType::Loop(loop_entered))
285 },
286 MastNode::Call(node) => {
287 let callee_hash = current_forest
288 .get_node_by_id(node.callee())
289 .expect("call node's callee expected to be in the forest")
290 .digest();
291
292 self.hasher_for_chiplet.record_hash_control_block(
293 callee_hash,
294 EMPTY_WORD,
295 node.domain(),
296 node.digest(),
297 );
298
299 let exec_ctx = {
300 let overflow_addr = self.overflow_table.last_update_clk_in_current_ctx();
301 ExecutionContextInfo::new(
302 processor.ctx,
303 processor.caller_hash,
304 processor.stack_depth(),
305 overflow_addr,
306 )
307 };
308 let block_type = if node.is_syscall() {
309 BlockType::SysCall
310 } else {
311 BlockType::Call
312 };
313
314 (Some(exec_ctx), block_type)
315 },
316 MastNode::Dyn(dyn_node) => {
317 self.hasher_for_chiplet.record_hash_control_block(
318 EMPTY_WORD,
319 EMPTY_WORD,
320 dyn_node.domain(),
321 dyn_node.digest(),
322 );
323
324 if dyn_node.is_dyncall() {
325 let exec_ctx = {
326 let overflow_addr = self.overflow_table.last_update_clk_in_current_ctx();
327 let stack_depth_after_drop = processor.stack_depth() - 1;
336 ExecutionContextInfo::new(
337 processor.ctx,
338 processor.caller_hash,
339 stack_depth_after_drop,
340 overflow_addr,
341 )
342 };
343 (Some(exec_ctx), BlockType::Dyncall)
344 } else {
345 (None, BlockType::Dyn)
346 }
347 },
348 MastNode::Block(_) => panic!(
349 "`ExecutionTracer::record_basic_block_start()` must be called instead for basic blocks"
350 ),
351 MastNode::External(_) => panic!(
352 "External nodes are guaranteed to be resolved before record_control_node_start is called"
353 ),
354 };
355
356 let block_addr = self.hasher_chiplet_shim.record_hash_control_block();
357 let parent_addr = self.block_stack.push(block_addr, block_type, ctx_info);
358 self.block_stack_replay.record_node_start(parent_addr);
359 }
360
361 fn record_node_end(&mut self, block_info: &BlockInfo) {
363 let flags = NodeFlags::new(
364 block_info.is_loop_body() == ONE,
365 block_info.is_entered_loop() == ONE,
366 block_info.is_call() == ONE,
367 block_info.is_syscall() == ONE,
368 );
369 let (prev_addr, prev_parent_addr) = if self.block_stack.is_empty() {
370 (ZERO, ZERO)
371 } else {
372 let prev_block = self.block_stack.peek();
373 (prev_block.addr, prev_block.parent_addr)
374 };
375 self.block_stack_replay.record_node_end(
376 block_info.addr,
377 flags,
378 prev_addr,
379 prev_parent_addr,
380 );
381 }
382
383 fn record_execution_context(&mut self, ctx_info: ExecutionContextSystemInfo) {
385 self.block_stack_replay.record_execution_context(ctx_info);
386 }
387
388 fn finish_current_fragment_context(&mut self) {
397 if let Some(snapshot) = self.state_snapshot.take() {
398 let hasher_replay = self.hasher_chiplet_shim.extract_replay();
400 let memory_reads_replay = core::mem::take(&mut self.memory_reads);
401 let advice_replay = core::mem::take(&mut self.advice);
402 let external_replay = core::mem::take(&mut self.external);
403 let stack_overflow_replay = core::mem::take(&mut self.overflow_replay);
404 let block_stack_replay = core::mem::take(&mut self.block_stack_replay);
405
406 let trace_state = CoreTraceFragmentContext {
407 state: snapshot.state,
408 replay: ExecutionReplay {
409 hasher: hasher_replay,
410 memory_reads: memory_reads_replay,
411 advice: advice_replay,
412 mast_forest_resolution: external_replay,
413 stack_overflow: stack_overflow_replay,
414 block_stack: block_stack_replay,
415 },
416 continuation: snapshot.continuation_stack,
417 initial_execution_state: snapshot.execution_state,
418 initial_mast_forest: snapshot.initial_mast_forest,
419 };
420
421 self.fragment_contexts.push(trace_state);
422 }
423 }
424}
425
426impl Tracer for ExecutionTracer {
427 fn start_clock_cycle(
430 &mut self,
431 processor: &FastProcessor,
432 execution_state: NodeExecutionState,
433 continuation_stack: &mut ContinuationStack,
434 current_forest: &Arc<MastForest>,
435 ) {
436 if processor.clk.as_usize().is_multiple_of(self.fragment_size) {
438 self.start_new_fragment_context(
439 SystemState::from_processor(processor),
440 processor
441 .stack_top()
442 .try_into()
443 .expect("stack_top expected to be MIN_STACK_DEPTH elements"),
444 continuation_stack.clone(),
445 execution_state.clone(),
446 current_forest.clone(),
447 );
448 }
449
450 match &execution_state {
452 NodeExecutionState::BasicBlock { .. } => {
453 },
455 NodeExecutionState::Start(mast_node_id) => match ¤t_forest[*mast_node_id] {
456 MastNode::Join(_)
457 | MastNode::Split(_)
458 | MastNode::Loop(_)
459 | MastNode::Dyn(_)
460 | MastNode::Call(_) => {
461 self.record_control_node_start(
462 ¤t_forest[*mast_node_id],
463 processor,
464 current_forest,
465 );
466 },
467 MastNode::Block(basic_block_node) => {
468 self.hasher_for_chiplet.record_hash_basic_block(
469 basic_block_node.op_batches().to_vec(),
470 basic_block_node.digest(),
471 );
472 let block_addr =
473 self.hasher_chiplet_shim.record_hash_basic_block(basic_block_node);
474 let parent_addr =
475 self.block_stack.push(block_addr, BlockType::BasicBlock, None);
476 self.block_stack_replay.record_node_start(parent_addr);
477 },
478 MastNode::External(_) => unreachable!(
479 "start_clock_cycle is guaranteed not to be called on external nodes"
480 ),
481 },
482 NodeExecutionState::Respan { node_id: _, batch_index: _ } => {
483 self.block_stack.peek_mut().addr += HASH_CYCLE_LEN;
484 },
485 NodeExecutionState::LoopRepeat(_) => {
486 },
488 NodeExecutionState::End(_) => {
489 let block_info = self.block_stack.pop();
490 self.record_node_end(&block_info);
491
492 if let Some(ctx_info) = block_info.ctx_info {
493 self.record_execution_context(ExecutionContextSystemInfo {
494 parent_ctx: ctx_info.parent_ctx,
495 parent_fn_hash: ctx_info.parent_fn_hash,
496 });
497 }
498 },
499 }
500 }
501
502 fn record_mast_forest_resolution(&mut self, node_id: MastNodeId, forest: &Arc<MastForest>) {
503 self.external.record_resolution(node_id, forest.clone());
504 }
505
506 fn record_hasher_permute(
507 &mut self,
508 input_state: [Felt; STATE_WIDTH],
509 output_state: [Felt; STATE_WIDTH],
510 ) {
511 self.hasher_for_chiplet.record_permute_input(input_state);
512 self.hasher_chiplet_shim.record_permute_output(output_state);
513 }
514
515 fn record_hasher_build_merkle_root(
516 &mut self,
517 node: Word,
518 path: Option<&MerklePath>,
519 index: Felt,
520 output_root: Word,
521 ) {
522 let path = path.expect("execution tracer expects a valid Merkle path");
523 self.hasher_chiplet_shim.record_build_merkle_root(path, output_root);
524 self.hasher_for_chiplet.record_build_merkle_root(node, path.clone(), index);
525 }
526
527 fn record_hasher_update_merkle_root(
528 &mut self,
529 old_value: Word,
530 new_value: Word,
531 path: Option<&MerklePath>,
532 index: Felt,
533 old_root: Word,
534 new_root: Word,
535 ) {
536 let path = path.expect("execution tracer expects a valid Merkle path");
537 self.hasher_chiplet_shim.record_update_merkle_root(path, old_root, new_root);
538 self.hasher_for_chiplet.record_update_merkle_root(
539 old_value,
540 new_value,
541 path.clone(),
542 index,
543 );
544 }
545
546 fn record_memory_read_element(
547 &mut self,
548 element: Felt,
549 addr: Felt,
550 ctx: ContextId,
551 clk: RowIndex,
552 ) {
553 self.memory_reads.record_read_element(element, addr, ctx, clk);
554 }
555
556 fn record_memory_read_word(&mut self, word: Word, addr: Felt, ctx: ContextId, clk: RowIndex) {
557 self.memory_reads.record_read_word(word, addr, ctx, clk);
558 }
559
560 fn record_memory_write_element(
561 &mut self,
562 element: Felt,
563 addr: Felt,
564 ctx: ContextId,
565 clk: RowIndex,
566 ) {
567 self.memory_writes.record_write_element(element, addr, ctx, clk);
568 }
569
570 fn record_memory_write_word(&mut self, word: Word, addr: Felt, ctx: ContextId, clk: RowIndex) {
571 self.memory_writes.record_write_word(word, addr, ctx, clk);
572 }
573
574 fn record_advice_pop_stack(&mut self, value: Felt) {
575 self.advice.record_pop_stack(value);
576 }
577
578 fn record_advice_pop_stack_word(&mut self, word: Word) {
579 self.advice.record_pop_stack_word(word);
580 }
581
582 fn record_advice_pop_stack_dword(&mut self, words: [Word; 2]) {
583 self.advice.record_pop_stack_dword(words);
584 }
585
586 fn record_u32and(&mut self, a: Felt, b: Felt) {
587 self.bitwise.record_u32and(a, b);
588 }
589
590 fn record_u32xor(&mut self, a: Felt, b: Felt) {
591 self.bitwise.record_u32xor(a, b);
592 }
593
594 fn record_u32_range_checks(&mut self, clk: RowIndex, u32_lo: Felt, u32_hi: Felt) {
595 let (t1, t0) = split_u32_into_u16(u32_lo.as_int());
596 let (t3, t2) = split_u32_into_u16(u32_hi.as_int());
597
598 self.range_checker.record_range_check_u32(clk, [t0, t1, t2, t3]);
599 }
600
601 fn record_kernel_proc_access(&mut self, proc_hash: Word) {
602 self.kernel.record_kernel_proc_access(proc_hash);
603 }
604
605 fn record_circuit_evaluation(&mut self, clk: RowIndex, circuit_eval: CircuitEvaluation) {
606 self.ace.record_circuit_evaluation(clk, circuit_eval);
607 }
608
609 fn increment_clk(&mut self) {
610 self.overflow_table.advance_clock();
611 }
612
613 fn increment_stack_size(&mut self, processor: &FastProcessor) {
614 let new_overflow_value = processor.stack_get(15);
615 self.overflow_table.push(new_overflow_value);
616 }
617
618 fn decrement_stack_size(&mut self) {
619 if let Some(popped_value) = self.overflow_table.pop() {
621 let new_overflow_addr = self.overflow_table.last_update_clk_in_current_ctx();
622 self.overflow_replay.record_pop_overflow(popped_value, new_overflow_addr);
623 }
624 }
625
626 fn start_context(&mut self) {
627 self.overflow_table.start_context();
628 }
629
630 fn restore_context(&mut self) {
631 self.overflow_table.restore_context();
632 self.overflow_replay.record_restore_context_overflow_addr(
633 MIN_STACK_DEPTH + self.overflow_table.num_elements_in_current_ctx(),
634 self.overflow_table.last_update_clk_in_current_ctx(),
635 );
636 }
637}
638
639const NUM_HASHER_ROWS_PER_PERMUTATION: u32 = 8;
645
646#[derive(Debug)]
653pub struct HasherChipletShim {
654 addr: u32,
658 replay: HasherResponseReplay,
660}
661
662impl HasherChipletShim {
663 pub fn new() -> Self {
665 Self {
666 addr: 1,
667 replay: HasherResponseReplay::default(),
668 }
669 }
670
671 pub fn record_hash_control_block(&mut self) -> Felt {
673 let block_addr = self.addr.into();
674
675 self.replay.record_block_address(block_addr);
676 self.addr += NUM_HASHER_ROWS_PER_PERMUTATION;
677
678 block_addr
679 }
680
681 pub fn record_hash_basic_block(&mut self, basic_block_node: &BasicBlockNode) -> Felt {
683 let block_addr = self.addr.into();
684
685 self.replay.record_block_address(block_addr);
686 self.addr += NUM_HASHER_ROWS_PER_PERMUTATION * basic_block_node.num_op_batches() as u32;
687
688 block_addr
689 }
690 pub fn record_permute_output(&mut self, hashed_state: [Felt; 12]) {
692 self.replay.record_permute(self.addr.into(), hashed_state);
693 self.addr += NUM_HASHER_ROWS_PER_PERMUTATION;
694 }
695
696 pub fn record_build_merkle_root(&mut self, path: &MerklePath, computed_root: Word) {
698 self.replay.record_build_merkle_root(self.addr.into(), computed_root);
699 self.addr += NUM_HASHER_ROWS_PER_PERMUTATION * path.depth() as u32;
700 }
701
702 pub fn record_update_merkle_root(&mut self, path: &MerklePath, old_root: Word, new_root: Word) {
704 self.replay.record_update_merkle_root(self.addr.into(), old_root, new_root);
705
706 self.addr += 2 * NUM_HASHER_ROWS_PER_PERMUTATION * path.depth() as u32;
708 }
709
710 pub fn extract_replay(&mut self) -> HasherResponseReplay {
711 core::mem::take(&mut self.replay)
712 }
713}
714
715impl Default for HasherChipletShim {
716 fn default() -> Self {
717 Self::new()
718 }
719}