1use std::{
2 ops::{Deref, DerefMut},
3 sync::Arc,
4};
5
6use hashbrown::HashMap;
7use sp1_hypercube::air::{PublicValues, PROOF_NONCE_NUM_WORDS};
8use sp1_jit::MinimalTrace;
9
10use crate::{
11 events::{
12 AluEvent, BranchEvent, IntoMemoryRecord, JumpEvent, MemInstrEvent, MemoryLocalEvent,
13 MemoryReadRecord, MemoryRecord, MemoryRecordEnum, MemoryWriteRecord, PrecompileEvent,
14 SyscallEvent, UTypeEvent,
15 },
16 vm::{
17 results::{
18 AluResult, BranchResult, CycleResult, EcallResult, JumpResult, LoadResult,
19 MaybeImmediate, StoreResult, UTypeResult,
20 },
21 syscall::SyscallRuntime,
22 CoreVM,
23 },
24 ALUTypeRecord, ExecutionError, ExecutionRecord, ITypeRecord, Instruction, JTypeRecord,
25 MemoryAccessRecord, Opcode, Program, RTypeRecord, Register, SP1CoreOpts, SyscallCode,
26};
27
28pub struct TracingVM<'a> {
30 pub core: CoreVM<'a>,
32 pub local_memory_access: LocalMemoryAccess,
34 pub precompile_local_memory_access: Option<LocalMemoryAccess>,
36 pub record: &'a mut ExecutionRecord,
38}
39
40impl TracingVM<'_> {
41 pub fn execute(&mut self) -> Result<CycleResult, ExecutionError> {
43 if self.core.is_done() {
44 return Ok(CycleResult::Done(true));
45 }
46
47 loop {
48 match self.execute_instruction()? {
49 CycleResult::Done(false) => {}
51 CycleResult::TraceEnd => {
52 self.register_refresh();
53 self.postprocess();
54 return Ok(CycleResult::ShardBoundary);
55 }
56 CycleResult::Done(true) => {
57 self.postprocess();
58 return Ok(CycleResult::Done(true));
59 }
60 CycleResult::ShardBoundary => {
61 unreachable!("Shard boundary should never be returned for tracing VM")
62 }
63 }
64 }
65 }
66
67 pub fn execute_instruction(&mut self) -> Result<CycleResult, ExecutionError> {
69 let instruction = self.core.fetch();
70 if instruction.is_none() {
71 unreachable!("Fetching the next instruction failed");
72 }
73
74 let instruction = unsafe { *instruction.unwrap_unchecked() };
76
77 match &instruction.opcode {
78 Opcode::ADD
79 | Opcode::ADDI
80 | Opcode::SUB
81 | Opcode::XOR
82 | Opcode::OR
83 | Opcode::AND
84 | Opcode::SLL
85 | Opcode::SLLW
86 | Opcode::SRL
87 | Opcode::SRA
88 | Opcode::SRLW
89 | Opcode::SRAW
90 | Opcode::SLT
91 | Opcode::SLTU
92 | Opcode::MUL
93 | Opcode::MULHU
94 | Opcode::MULHSU
95 | Opcode::MULH
96 | Opcode::MULW
97 | Opcode::DIVU
98 | Opcode::REMU
99 | Opcode::DIV
100 | Opcode::REM
101 | Opcode::DIVW
102 | Opcode::ADDW
103 | Opcode::SUBW
104 | Opcode::DIVUW
105 | Opcode::REMUW
106 | Opcode::REMW => {
107 self.execute_alu(&instruction);
108 }
109 Opcode::LB
110 | Opcode::LBU
111 | Opcode::LH
112 | Opcode::LHU
113 | Opcode::LW
114 | Opcode::LWU
115 | Opcode::LD => self.execute_load(&instruction)?,
116 Opcode::SB | Opcode::SH | Opcode::SW | Opcode::SD => {
117 self.execute_store(&instruction)?;
118 }
119 Opcode::JAL | Opcode::JALR => {
120 self.execute_jump(&instruction);
121 }
122 Opcode::BEQ | Opcode::BNE | Opcode::BLT | Opcode::BGE | Opcode::BLTU | Opcode::BGEU => {
123 self.execute_branch(&instruction);
124 }
125 Opcode::LUI | Opcode::AUIPC => {
126 self.execute_utype(&instruction);
127 }
128 Opcode::ECALL => self.execute_ecall(&instruction)?,
129 Opcode::EBREAK | Opcode::UNIMP => {
130 unreachable!("Invalid opcode for `execute_instruction`: {:?}", instruction.opcode)
131 }
132 }
133
134 Ok(self.core.advance())
135 }
136
137 fn postprocess(&mut self) {
138 if self.record.last_timestamp == 0 {
139 self.record.last_timestamp = self.core.clk();
140 }
141
142 self.record.program = self.core.program.clone();
143 if self.record.contains_cpu() {
144 self.record.public_values.pc_start = self.record.pc_start.unwrap();
145 self.record.public_values.next_pc = self.record.next_pc;
146 self.record.public_values.exit_code = self.record.exit_code;
147 self.record.public_values.last_timestamp = self.record.last_timestamp;
148 self.record.public_values.initial_timestamp = self.record.initial_timestamp;
149 }
150
151 for (_, event) in self.local_memory_access.inner.drain() {
152 self.record.cpu_local_memory_access.push(event);
153 }
154 }
155
156 fn register_refresh(&mut self) {
157 for (addr, record) in self.core.register_refresh().into_iter().enumerate() {
158 self.local_memory_access.insert_record(addr as u64, record);
159
160 self.record.bump_memory_events.push((
161 MemoryRecordEnum::Read(record),
162 addr as u64,
163 true,
164 ));
165 }
166 }
167
168 #[must_use]
170 pub fn registers(&self) -> &[MemoryRecord; 32] {
171 self.core.registers()
172 }
173
174 #[must_use]
176 pub fn registers_mut(&mut self) -> &mut [MemoryRecord; 32] {
177 self.core.registers_mut()
178 }
179}
180
181impl<'a> TracingVM<'a> {
182 pub fn new<T: MinimalTrace>(
184 trace: &'a T,
185 program: Arc<Program>,
186 opts: SP1CoreOpts,
187 proof_nonce: [u32; PROOF_NONCE_NUM_WORDS],
188 record: &'a mut ExecutionRecord,
189 ) -> Self {
190 record.initial_timestamp = trace.clk_start();
191
192 Self {
193 core: CoreVM::new(trace, program, opts, proof_nonce),
194 record,
195 local_memory_access: LocalMemoryAccess::default(),
196 precompile_local_memory_access: None,
197 }
198 }
199
200 #[must_use]
202 pub fn public_values(&self) -> &PublicValues<u32, u64, u64, u32> {
203 &self.record.public_values
204 }
205
206 pub fn execute_load(&mut self, instruction: &Instruction) -> Result<(), ExecutionError> {
213 let LoadResult { mut a, b, c, rs1, rd, addr, rr_record, rw_record, mr_record } =
214 self.core.execute_load(instruction)?;
215
216 let mem_access_record = MemoryAccessRecord {
217 a: Some(MemoryRecordEnum::Write(rw_record)),
218 b: Some(MemoryRecordEnum::Read(rr_record)),
219 c: None,
220 memory: Some(MemoryRecordEnum::Read(mr_record)),
221 untrusted_instruction: None,
222 };
223
224 let op_a_0 = instruction.op_a == 0;
225 if op_a_0 {
226 a = 0;
227 }
228
229 self.local_memory_access.insert_record(rd as u64, rw_record);
230 self.local_memory_access.insert_record(rs1 as u64, rr_record);
231 self.local_memory_access.insert_record(addr & !0b111, mr_record);
232
233 self.emit_events(self.core.clk(), self.core.next_pc(), instruction, &mem_access_record, 0);
234 self.emit_mem_instr_event(instruction, a, b, c, &mem_access_record, op_a_0);
235
236 Ok(())
237 }
238
239 fn execute_store(&mut self, instruction: &Instruction) -> Result<(), ExecutionError> {
246 let StoreResult { mut a, b, c, rs1, rs2, addr, rs1_record, rs2_record, mw_record } =
247 self.core.execute_store(instruction)?;
248
249 let mem_access_record = MemoryAccessRecord {
250 a: Some(MemoryRecordEnum::Read(rs1_record)),
251 b: Some(MemoryRecordEnum::Read(rs2_record)),
252 c: None,
253 memory: Some(MemoryRecordEnum::Write(mw_record)),
254 untrusted_instruction: None,
255 };
256
257 let op_a_0 = instruction.op_a == 0;
258 if op_a_0 {
259 a = 0;
260 }
261
262 self.local_memory_access.insert_record(addr & !0b111, mw_record);
263 self.local_memory_access.insert_record(rs1 as u64, rs1_record);
264 self.local_memory_access.insert_record(rs2 as u64, rs2_record);
265
266 self.emit_mem_instr_event(instruction, a, b, c, &mem_access_record, op_a_0);
267 self.emit_events(self.core.clk(), self.core.next_pc(), instruction, &mem_access_record, 0);
268
269 Ok(())
270 }
271
272 fn execute_alu(&mut self, instruction: &Instruction) {
274 let AluResult { rd, rw_record, mut a, b, c, rs1, rs2 } = self.core.execute_alu(instruction);
275
276 if let MaybeImmediate::Register(rs2, rs2_record) = rs2 {
277 self.local_memory_access.insert_record(rs2 as u64, rs2_record);
278 }
279
280 if let MaybeImmediate::Register(rs1, rs1_record) = rs1 {
281 self.local_memory_access.insert_record(rs1 as u64, rs1_record);
282 }
283
284 self.local_memory_access.insert_record(rd as u64, rw_record);
285
286 let mem_access_record = MemoryAccessRecord {
287 a: Some(MemoryRecordEnum::Write(rw_record)),
288 b: rs1.record().map(|r| MemoryRecordEnum::Read(*r)),
289 c: rs2.record().map(|r| MemoryRecordEnum::Read(*r)),
290 memory: None,
291 untrusted_instruction: None,
292 };
293
294 let op_a_0 = instruction.op_a == 0;
295 if op_a_0 {
296 a = 0;
297 }
298
299 self.emit_events(self.core.clk(), self.core.next_pc(), instruction, &mem_access_record, 0);
300 self.emit_alu_event(instruction, a, b, c, &mem_access_record, op_a_0);
301 }
302
303 fn execute_jump(&mut self, instruction: &Instruction) {
305 let JumpResult { mut a, b, c, rd, rd_record, rs1 } = self.core.execute_jump(instruction);
306
307 if let MaybeImmediate::Register(rs1, rs1_record) = rs1 {
308 self.local_memory_access.insert_record(rs1 as u64, rs1_record);
309 }
310
311 self.local_memory_access.insert_record(rd as u64, rd_record);
312
313 let mem_access_record = MemoryAccessRecord {
314 a: Some(MemoryRecordEnum::Write(rd_record)),
315 b: rs1.record().map(|r| MemoryRecordEnum::Read(*r)),
316 c: None,
317 memory: None,
318 untrusted_instruction: None,
319 };
320
321 let op_a_0 = instruction.op_a == 0;
322 if op_a_0 {
323 a = 0;
324 }
325
326 self.emit_events(self.core.clk(), self.core.next_pc(), instruction, &mem_access_record, 0);
327 match instruction.opcode {
328 Opcode::JAL => self.emit_jal_event(
329 instruction,
330 a,
331 b,
332 c,
333 &mem_access_record,
334 op_a_0,
335 self.core.next_pc(),
336 ),
337 Opcode::JALR => self.emit_jalr_event(
338 instruction,
339 a,
340 b,
341 c,
342 &mem_access_record,
343 op_a_0,
344 self.core.next_pc(),
345 ),
346 _ => unreachable!("Invalid opcode for `execute_jump`: {:?}", instruction.opcode),
347 }
348 }
349
350 fn execute_branch(&mut self, instruction: &Instruction) {
352 let BranchResult { mut a, rs1, a_record, b, rs2, b_record, c } =
353 self.core.execute_branch(instruction);
354
355 self.local_memory_access.insert_record(rs2 as u64, b_record);
356 self.local_memory_access.insert_record(rs1 as u64, a_record);
357
358 let mem_access_record = MemoryAccessRecord {
359 a: Some(MemoryRecordEnum::Read(a_record)),
360 b: Some(MemoryRecordEnum::Read(b_record)),
361 c: None,
362 memory: None,
363 untrusted_instruction: None,
364 };
365
366 let op_a_0 = instruction.op_a == 0;
367 if op_a_0 {
368 a = 0;
369 }
370
371 self.emit_events(self.core.clk(), self.core.next_pc(), instruction, &mem_access_record, 0);
372 self.emit_branch_event(
373 instruction,
374 a,
375 b,
376 c,
377 &mem_access_record,
378 op_a_0,
379 self.core.next_pc(),
380 );
381 }
382
383 fn execute_utype(&mut self, instruction: &Instruction) {
385 let UTypeResult { mut a, b, c, rd, rw_record } = self.core.execute_utype(instruction);
386
387 self.local_memory_access.insert_record(rd as u64, rw_record);
388
389 let mem_access_record = MemoryAccessRecord {
390 a: Some(MemoryRecordEnum::Write(rw_record)),
391 b: None,
392 c: None,
393 memory: None,
394 untrusted_instruction: None,
395 };
396
397 let op_a_0 = instruction.op_a == 0;
398 if op_a_0 {
399 a = 0;
400 }
401
402 self.emit_events(self.core.clk(), self.core.next_pc(), instruction, &mem_access_record, 0);
403 self.emit_utype_event(instruction, a, b, c, &mem_access_record, op_a_0);
404 }
405
406 fn execute_ecall(&mut self, instruction: &Instruction) -> Result<(), ExecutionError> {
408 let code = self.core.read_code();
409
410 if !self.core().is_retained_syscall(code) && code.should_send() == 1 {
415 self.precompile_local_memory_access = Some(LocalMemoryAccess::default());
416 }
417
418 let EcallResult { a: _, a_record, b, b_record, c, c_record } =
420 CoreVM::<'a>::execute_ecall(self, instruction, code)?;
421
422 self.local_memory_access.insert_record(Register::X11 as u64, c_record);
423 self.local_memory_access.insert_record(Register::X10 as u64, b_record);
424 self.local_memory_access.insert_record(Register::X5 as u64, a_record);
425
426 let mem_access_record = MemoryAccessRecord {
427 a: Some(MemoryRecordEnum::Write(a_record)),
428 b: Some(MemoryRecordEnum::Read(b_record)),
429 c: Some(MemoryRecordEnum::Read(c_record)),
430 memory: None,
431 untrusted_instruction: None,
432 };
433
434 let op_a_0 = instruction.op_a == 0;
435 self.emit_events(
436 self.core.clk(),
437 self.core.next_pc(),
438 instruction,
439 &mem_access_record,
440 self.core.exit_code(),
441 );
442
443 self.emit_syscall_event(
444 self.core.clk(),
445 code,
446 b,
447 c,
448 &mem_access_record,
449 op_a_0,
450 self.core.next_pc(),
451 self.core.exit_code(),
452 instruction,
453 );
454
455 Ok(())
456 }
457}
458
459impl TracingVM<'_> {
460 #[allow(clippy::too_many_arguments)]
462 fn emit_events(
463 &mut self,
464 clk: u64,
465 next_pc: u64,
466 instruction: &Instruction,
467 record: &MemoryAccessRecord,
468 exit_code: u32,
469 ) {
470 self.record.pc_start.get_or_insert(self.core.pc());
471 self.record.next_pc = next_pc;
472 self.record.exit_code = exit_code;
473 self.record.cpu_event_count += 1;
474
475 let increment = self.core.next_clk() - clk;
476
477 let bump1 = clk % (1 << 24) + increment >= (1 << 24);
478 let bump2 = !instruction.is_with_correct_next_pc()
479 && next_pc == self.core.pc().wrapping_add(4)
480 && (next_pc >> 16) != (self.core.pc() >> 16);
481
482 if bump1 || bump2 {
483 self.record.bump_state_events.push((clk, increment, bump2, next_pc));
484 }
485
486 if let Some(x) = record.a {
487 if x.current_record().timestamp >> 24 != x.previous_record().timestamp >> 24 {
488 self.record.bump_memory_events.push((x, instruction.op_a as u64, false));
489 }
490 }
491 if let Some(x) = record.b {
492 if x.current_record().timestamp >> 24 != x.previous_record().timestamp >> 24 {
493 self.record.bump_memory_events.push((x, instruction.op_b, false));
494 }
495 }
496 if let Some(x) = record.c {
497 if x.current_record().timestamp >> 24 != x.previous_record().timestamp >> 24 {
498 self.record.bump_memory_events.push((x, instruction.op_c, false));
499 }
500 }
501 }
502
503 #[inline]
505 fn emit_mem_instr_event(
506 &mut self,
507 instruction: &Instruction,
508 a: u64,
509 b: u64,
510 c: u64,
511 record: &MemoryAccessRecord,
512 op_a_0: bool,
513 ) {
514 let opcode = instruction.opcode;
515 let event = MemInstrEvent {
516 clk: self.core.clk(),
517 pc: self.core.pc(),
518 opcode,
519 a,
520 b,
521 c,
522 op_a_0,
523 mem_access: unsafe { record.memory.unwrap_unchecked() },
527 };
528
529 let record = ITypeRecord::new(record, instruction);
530 if matches!(
531 opcode,
532 Opcode::LB
533 | Opcode::LBU
534 | Opcode::LH
535 | Opcode::LHU
536 | Opcode::LW
537 | Opcode::LWU
538 | Opcode::LD
539 ) && op_a_0
540 {
541 self.record.memory_load_x0_events.push((event, record));
542 } else if matches!(opcode, Opcode::LB | Opcode::LBU) {
543 self.record.memory_load_byte_events.push((event, record));
544 } else if matches!(opcode, Opcode::LH | Opcode::LHU) {
545 self.record.memory_load_half_events.push((event, record));
546 } else if matches!(opcode, Opcode::LW | Opcode::LWU) {
547 self.record.memory_load_word_events.push((event, record));
548 } else if opcode == Opcode::LD {
549 self.record.memory_load_double_events.push((event, record));
550 } else if opcode == Opcode::SB {
551 self.record.memory_store_byte_events.push((event, record));
552 } else if opcode == Opcode::SH {
553 self.record.memory_store_half_events.push((event, record));
554 } else if opcode == Opcode::SW {
555 self.record.memory_store_word_events.push((event, record));
556 } else if opcode == Opcode::SD {
557 self.record.memory_store_double_events.push((event, record));
558 }
559 }
560
561 fn emit_alu_event(
563 &mut self,
564 instruction: &Instruction,
565 a: u64,
566 b: u64,
567 c: u64,
568 record: &MemoryAccessRecord,
569 op_a_0: bool,
570 ) {
571 let opcode = instruction.opcode;
572 let event = AluEvent { clk: self.core.clk(), pc: self.core.pc(), opcode, a, b, c, op_a_0 };
573 match opcode {
574 Opcode::ADD => {
575 let record = RTypeRecord::new(record, instruction);
576 self.record.add_events.push((event, record));
577 }
578 Opcode::ADDW => {
579 let record = ALUTypeRecord::new(record, instruction);
580 self.record.addw_events.push((event, record));
581 }
582 Opcode::ADDI => {
583 let record = ITypeRecord::new(record, instruction);
584 self.record.addi_events.push((event, record));
585 }
586 Opcode::SUB => {
587 let record = RTypeRecord::new(record, instruction);
588 self.record.sub_events.push((event, record));
589 }
590 Opcode::SUBW => {
591 let record = RTypeRecord::new(record, instruction);
592 self.record.subw_events.push((event, record));
593 }
594 Opcode::XOR | Opcode::OR | Opcode::AND => {
595 let record = ALUTypeRecord::new(record, instruction);
596 self.record.bitwise_events.push((event, record));
597 }
598 Opcode::SLL | Opcode::SLLW => {
599 let record = ALUTypeRecord::new(record, instruction);
600 self.record.shift_left_events.push((event, record));
601 }
602 Opcode::SRL | Opcode::SRA | Opcode::SRLW | Opcode::SRAW => {
603 let record = ALUTypeRecord::new(record, instruction);
604 self.record.shift_right_events.push((event, record));
605 }
606 Opcode::SLT | Opcode::SLTU => {
607 let record = ALUTypeRecord::new(record, instruction);
608 self.record.lt_events.push((event, record));
609 }
610 Opcode::MUL | Opcode::MULHU | Opcode::MULHSU | Opcode::MULH | Opcode::MULW => {
611 let record = RTypeRecord::new(record, instruction);
612 self.record.mul_events.push((event, record));
613 }
614 Opcode::DIVU
615 | Opcode::REMU
616 | Opcode::DIV
617 | Opcode::REM
618 | Opcode::DIVW
619 | Opcode::DIVUW
620 | Opcode::REMUW
621 | Opcode::REMW => {
622 let record = RTypeRecord::new(record, instruction);
623 self.record.divrem_events.push((event, record));
624 }
625 _ => unreachable!(),
626 }
627 }
628
629 #[inline]
631 #[allow(clippy::too_many_arguments)]
632 fn emit_jal_event(
633 &mut self,
634 instruction: &Instruction,
635 a: u64,
636 b: u64,
637 c: u64,
638 record: &MemoryAccessRecord,
639 op_a_0: bool,
640 next_pc: u64,
641 ) {
642 let event = JumpEvent {
643 clk: self.core.clk(),
644 pc: self.core.pc(),
645 next_pc,
646 opcode: instruction.opcode,
647 a,
648 b,
649 c,
650 op_a_0,
651 };
652 let record = JTypeRecord::new(record, instruction);
653 self.record.jal_events.push((event, record));
654 }
655
656 #[inline]
658 #[allow(clippy::too_many_arguments)]
659 fn emit_jalr_event(
660 &mut self,
661 instruction: &Instruction,
662 a: u64,
663 b: u64,
664 c: u64,
665 record: &MemoryAccessRecord,
666 op_a_0: bool,
667 next_pc: u64,
668 ) {
669 let event = JumpEvent {
670 clk: self.core.clk(),
671 pc: self.core.pc(),
672 next_pc,
673 opcode: instruction.opcode,
674 a,
675 b,
676 c,
677 op_a_0,
678 };
679 let record = ITypeRecord::new(record, instruction);
680 self.record.jalr_events.push((event, record));
681 }
682
683 #[inline]
685 #[allow(clippy::too_many_arguments)]
686 fn emit_branch_event(
687 &mut self,
688 instruction: &Instruction,
689 a: u64,
690 b: u64,
691 c: u64,
692 record: &MemoryAccessRecord,
693 op_a_0: bool,
694 next_pc: u64,
695 ) {
696 let event = BranchEvent {
697 clk: self.core.clk(),
698 pc: self.core.pc(),
699 next_pc,
700 opcode: instruction.opcode,
701 a,
702 b,
703 c,
704 op_a_0,
705 };
706 let record = ITypeRecord::new(record, instruction);
707 self.record.branch_events.push((event, record));
708 }
709
710 #[inline]
712 fn emit_utype_event(
713 &mut self,
714 instruction: &Instruction,
715 a: u64,
716 b: u64,
717 c: u64,
718 record: &MemoryAccessRecord,
719 op_a_0: bool,
720 ) {
721 let event = UTypeEvent {
722 clk: self.core.clk(),
723 pc: self.core.pc(),
724 opcode: instruction.opcode,
725 a,
726 b,
727 c,
728 op_a_0,
729 };
730 let record = JTypeRecord::new(record, instruction);
731 self.record.utype_events.push((event, record));
732 }
733
734 #[allow(clippy::too_many_arguments)]
736 fn emit_syscall_event(
737 &mut self,
738 clk: u64,
739 syscall_code: SyscallCode,
740 arg1: u64,
741 arg2: u64,
742 record: &MemoryAccessRecord,
743 op_a_0: bool,
744 next_pc: u64,
745 exit_code: u32,
746 instruction: &Instruction,
747 ) {
748 let syscall_event =
749 self.syscall_event(clk, syscall_code, arg1, arg2, op_a_0, next_pc, exit_code);
750
751 let record = RTypeRecord::new(record, instruction);
752 self.record.syscall_events.push((syscall_event, record));
753 }
754}
755
756impl<'a> SyscallRuntime<'a> for TracingVM<'a> {
757 const TRACING: bool = true;
758
759 fn core(&self) -> &CoreVM<'a> {
760 &self.core
761 }
762
763 fn core_mut(&mut self) -> &mut CoreVM<'a> {
764 &mut self.core
765 }
766
767 #[inline]
769 fn syscall_event(
770 &self,
771 clk: u64,
772 syscall_code: SyscallCode,
773 arg1: u64,
774 arg2: u64,
775 op_a_0: bool,
776 next_pc: u64,
777 exit_code: u32,
778 ) -> SyscallEvent {
779 let should_send =
781 syscall_code.should_send() != 0 && !self.core.is_retained_syscall(syscall_code);
782
783 SyscallEvent {
784 pc: self.core.pc(),
785 next_pc,
786 clk,
787 op_a_0,
788 should_send,
789 syscall_code,
790 syscall_id: syscall_code.syscall_id(),
791 arg1,
792 arg2,
793 exit_code,
794 }
795 }
796
797 fn add_precompile_event(
798 &mut self,
799 syscall_code: SyscallCode,
800 syscall_event: SyscallEvent,
801 event: PrecompileEvent,
802 ) {
803 self.record.precompile_events.add_event(syscall_code, syscall_event, event);
804 }
805
806 fn record_mut(&mut self) -> &mut ExecutionRecord {
807 self.record
808 }
809
810 fn rr(&mut self, register: usize) -> MemoryReadRecord {
811 let record = SyscallRuntime::rr(self.core_mut(), register);
812
813 if let Some(local_memory_access) = &mut self.precompile_local_memory_access {
814 local_memory_access.insert_record(register as u64, record);
815 } else {
816 self.local_memory_access.insert_record(register as u64, record);
817 }
818
819 record
820 }
821
822 fn mr(&mut self, addr: u64) -> MemoryReadRecord {
823 let record = SyscallRuntime::mr(self.core_mut(), addr);
824
825 if let Some(local_memory_access) = &mut self.precompile_local_memory_access {
826 local_memory_access.insert_record(addr, record);
827 } else {
828 self.local_memory_access.insert_record(addr, record);
829 }
830
831 record
832 }
833
834 fn mr_slice(&mut self, addr: u64, len: usize) -> Vec<MemoryReadRecord> {
835 let records = SyscallRuntime::mr_slice(self.core_mut(), addr, len);
836
837 for (i, record) in records.iter().enumerate() {
838 if let Some(local_memory_access) = &mut self.precompile_local_memory_access {
839 local_memory_access.insert_record(addr + i as u64 * 8, *record);
840 } else {
841 self.local_memory_access.insert_record(addr + i as u64 * 8, *record);
842 }
843 }
844
845 records
846 }
847
848 fn mw(&mut self, addr: u64) -> MemoryWriteRecord {
849 let record = SyscallRuntime::mw(self.core_mut(), addr);
850
851 if let Some(local_memory_access) = &mut self.precompile_local_memory_access {
852 local_memory_access.insert_record(addr, record);
853 } else {
854 self.local_memory_access.insert_record(addr, record);
855 }
856
857 record
858 }
859
860 fn mw_slice(&mut self, addr: u64, len: usize) -> Vec<MemoryWriteRecord> {
861 let records = SyscallRuntime::mw_slice(self.core_mut(), addr, len);
862
863 for (i, record) in records.iter().enumerate() {
864 if let Some(local_memory_access) = &mut self.precompile_local_memory_access {
865 local_memory_access.insert_record(addr + i as u64 * 8, *record);
866 } else {
867 self.local_memory_access.insert_record(addr + i as u64 * 8, *record);
868 }
869 }
870
871 records
872 }
873
874 fn postprocess_precompile(&mut self) -> Vec<MemoryLocalEvent> {
875 let mut precompile_local_memory_access = Vec::new();
876
877 if let Some(mut local_memory_access) =
878 std::mem::take(&mut self.precompile_local_memory_access)
879 {
880 for (addr, event) in local_memory_access.drain() {
881 if let Some(cpu_mem_access) = self.local_memory_access.remove(&addr) {
882 self.record.cpu_local_memory_access.push(cpu_mem_access);
883 }
884
885 precompile_local_memory_access.push(event);
886 }
887 }
888
889 precompile_local_memory_access
890 }
891}
892
893#[derive(Debug, Default)]
894pub struct LocalMemoryAccess {
895 pub inner: HashMap<u64, MemoryLocalEvent>,
896}
897
898impl LocalMemoryAccess {
899 #[inline]
900 #[allow(clippy::needless_pass_by_value)]
901 pub(crate) fn insert_record(&mut self, addr: u64, event: impl IntoMemoryRecord) {
902 self.inner
903 .entry(addr)
904 .and_modify(|e| {
905 let current_record = event.current_record();
906 let previous_record = event.previous_record();
907
908 if current_record.timestamp > e.final_mem_access.timestamp {
910 e.final_mem_access = current_record;
911 }
912
913 if previous_record.timestamp < e.initial_mem_access.timestamp {
915 e.initial_mem_access = previous_record;
916 }
917 })
918 .or_insert_with(|| MemoryLocalEvent {
919 addr,
920 initial_mem_access: event.previous_record(),
921 final_mem_access: event.current_record(),
922 });
923 }
924}
925
926impl Deref for LocalMemoryAccess {
927 type Target = HashMap<u64, MemoryLocalEvent>;
928 fn deref(&self) -> &Self::Target {
929 &self.inner
930 }
931}
932
933impl DerefMut for LocalMemoryAccess {
934 fn deref_mut(&mut self) -> &mut Self::Target {
935 &mut self.inner
936 }
937}