1use std::{
2 ops::{Deref, DerefMut},
3 sync::Arc,
4};
5
6use hashbrown::HashMap;
7use sp1_hypercube::air::{PublicValues, PROOF_NONCE_NUM_WORDS};
8use sp1_jit::MinimalTrace;
9
10use crate::{
11 events::{
12 AluEvent, BranchEvent, IntoMemoryRecord, JumpEvent, MemInstrEvent, MemoryLocalEvent,
13 MemoryReadRecord, MemoryRecord, MemoryRecordEnum, MemoryWriteRecord, PrecompileEvent,
14 SyscallEvent, UTypeEvent,
15 },
16 vm::{
17 results::{
18 AluResult, BranchResult, CycleResult, EcallResult, JumpResult, LoadResult,
19 MaybeImmediate, StoreResult, UTypeResult,
20 },
21 syscall::SyscallRuntime,
22 CoreVM,
23 },
24 ALUTypeRecord, ExecutionError, ExecutionRecord, ITypeRecord, Instruction, JTypeRecord,
25 MemoryAccessRecord, Opcode, Program, RTypeRecord, Register, SP1CoreOpts, SyscallCode,
26};
27
28pub struct TracingVM<'a> {
30 pub core: CoreVM<'a>,
32 pub local_memory_access: LocalMemoryAccess,
34 pub precompile_local_memory_access: Option<LocalMemoryAccess>,
36 pub record: &'a mut ExecutionRecord,
38}
39
40impl TracingVM<'_> {
41 pub fn execute(&mut self) -> Result<CycleResult, ExecutionError> {
43 if self.core.is_done() {
44 return Ok(CycleResult::Done(true));
45 }
46
47 loop {
48 match self.execute_instruction()? {
49 CycleResult::Done(false) => {}
51 CycleResult::TraceEnd => {
52 self.register_refresh();
53 self.postprocess();
54 return Ok(CycleResult::ShardBoundary);
55 }
56 CycleResult::Done(true) => {
57 self.postprocess();
58 return Ok(CycleResult::Done(true));
59 }
60 CycleResult::ShardBoundary => {
61 unreachable!("Shard boundary should never be returned for tracing VM")
62 }
63 }
64 }
65 }
66
67 pub fn execute_instruction(&mut self) -> Result<CycleResult, ExecutionError> {
69 let instruction = self.core.fetch();
70 if instruction.is_none() {
71 unreachable!("Fetching the next instruction failed");
72 }
73
74 let instruction = unsafe { *instruction.unwrap_unchecked() };
76
77 match &instruction.opcode {
78 Opcode::ADD
79 | Opcode::ADDI
80 | Opcode::SUB
81 | Opcode::XOR
82 | Opcode::OR
83 | Opcode::AND
84 | Opcode::SLL
85 | Opcode::SLLW
86 | Opcode::SRL
87 | Opcode::SRA
88 | Opcode::SRLW
89 | Opcode::SRAW
90 | Opcode::SLT
91 | Opcode::SLTU
92 | Opcode::MUL
93 | Opcode::MULHU
94 | Opcode::MULHSU
95 | Opcode::MULH
96 | Opcode::MULW
97 | Opcode::DIVU
98 | Opcode::REMU
99 | Opcode::DIV
100 | Opcode::REM
101 | Opcode::DIVW
102 | Opcode::ADDW
103 | Opcode::SUBW
104 | Opcode::DIVUW
105 | Opcode::REMUW
106 | Opcode::REMW => {
107 self.execute_alu(&instruction);
108 }
109 Opcode::LB
110 | Opcode::LBU
111 | Opcode::LH
112 | Opcode::LHU
113 | Opcode::LW
114 | Opcode::LWU
115 | Opcode::LD => self.execute_load(&instruction)?,
116 Opcode::SB | Opcode::SH | Opcode::SW | Opcode::SD => {
117 self.execute_store(&instruction)?;
118 }
119 Opcode::JAL | Opcode::JALR => {
120 self.execute_jump(&instruction);
121 }
122 Opcode::BEQ | Opcode::BNE | Opcode::BLT | Opcode::BGE | Opcode::BLTU | Opcode::BGEU => {
123 self.execute_branch(&instruction);
124 }
125 Opcode::LUI | Opcode::AUIPC => {
126 self.execute_utype(&instruction);
127 }
128 Opcode::ECALL => self.execute_ecall(&instruction)?,
129 Opcode::EBREAK | Opcode::UNIMP => {
130 unreachable!("Invalid opcode for `execute_instruction`: {:?}", instruction.opcode)
131 }
132 }
133
134 Ok(self.core.advance())
135 }
136
137 fn postprocess(&mut self) {
138 if self.record.last_timestamp == 0 {
139 self.record.last_timestamp = self.core.clk();
140 }
141
142 self.record.program = self.core.program.clone();
143 if self.record.contains_cpu() {
144 self.record.public_values.pc_start = self.record.pc_start.unwrap();
145 self.record.public_values.next_pc = self.record.next_pc;
146 self.record.public_values.exit_code = self.record.exit_code;
147 self.record.public_values.last_timestamp = self.record.last_timestamp;
148 self.record.public_values.initial_timestamp = self.record.initial_timestamp;
149 }
150
151 for (_, event) in self.local_memory_access.inner.drain() {
152 self.record.cpu_local_memory_access.push(event);
153 }
154 }
155
156 fn register_refresh(&mut self) {
157 for (addr, record) in self.core.register_refresh().into_iter().enumerate() {
158 self.local_memory_access.insert_record(addr as u64, record);
159
160 self.record.bump_memory_events.push((
161 MemoryRecordEnum::Read(record),
162 addr as u64,
163 true,
164 ));
165 }
166 }
167
168 #[must_use]
170 pub fn registers(&self) -> &[MemoryRecord; 32] {
171 self.core.registers()
172 }
173
174 #[must_use]
176 pub fn registers_mut(&mut self) -> &mut [MemoryRecord; 32] {
177 self.core.registers_mut()
178 }
179}
180
181impl<'a> TracingVM<'a> {
182 pub fn new<T: MinimalTrace>(
184 trace: &'a T,
185 program: Arc<Program>,
186 opts: SP1CoreOpts,
187 proof_nonce: [u32; PROOF_NONCE_NUM_WORDS],
188 record: &'a mut ExecutionRecord,
189 ) -> Self {
190 record.initial_timestamp = trace.clk_start();
191
192 Self {
193 core: CoreVM::new(trace, program, opts, proof_nonce),
194 record,
195 local_memory_access: LocalMemoryAccess::default(),
196 precompile_local_memory_access: None,
197 }
198 }
199
200 #[must_use]
202 pub fn public_values(&self) -> &PublicValues<u32, u64, u64, u32> {
203 &self.record.public_values
204 }
205
206 pub fn execute_load(&mut self, instruction: &Instruction) -> Result<(), ExecutionError> {
213 let LoadResult { mut a, b, c, rs1, rd, addr, rr_record, rw_record, mr_record } =
214 self.core.execute_load(instruction)?;
215
216 let mem_access_record = MemoryAccessRecord {
217 a: Some(MemoryRecordEnum::Write(rw_record)),
218 b: Some(MemoryRecordEnum::Read(rr_record)),
219 c: None,
220 memory: Some(MemoryRecordEnum::Read(mr_record)),
221 untrusted_instruction: None,
222 };
223
224 let op_a_0 = instruction.op_a == 0;
225 if op_a_0 {
226 a = 0;
227 }
228
229 self.local_memory_access.insert_record(rd as u64, rw_record);
230 self.local_memory_access.insert_record(rs1 as u64, rr_record);
231 self.local_memory_access.insert_record(addr & !0b111, mr_record);
232
233 self.emit_events(self.core.clk(), self.core.next_pc(), instruction, &mem_access_record, 0);
234 self.emit_mem_instr_event(instruction, a, b, c, &mem_access_record, op_a_0);
235
236 Ok(())
237 }
238
239 fn execute_store(&mut self, instruction: &Instruction) -> Result<(), ExecutionError> {
246 let StoreResult { mut a, b, c, rs1, rs2, addr, rs1_record, rs2_record, mw_record } =
247 self.core.execute_store(instruction)?;
248
249 let mem_access_record = MemoryAccessRecord {
250 a: Some(MemoryRecordEnum::Read(rs1_record)),
251 b: Some(MemoryRecordEnum::Read(rs2_record)),
252 c: None,
253 memory: Some(MemoryRecordEnum::Write(mw_record)),
254 untrusted_instruction: None,
255 };
256
257 let op_a_0 = instruction.op_a == 0;
258 if op_a_0 {
259 a = 0;
260 }
261
262 self.local_memory_access.insert_record(addr & !0b111, mw_record);
263 self.local_memory_access.insert_record(rs1 as u64, rs1_record);
264 self.local_memory_access.insert_record(rs2 as u64, rs2_record);
265
266 self.emit_mem_instr_event(instruction, a, b, c, &mem_access_record, op_a_0);
267 self.emit_events(self.core.clk(), self.core.next_pc(), instruction, &mem_access_record, 0);
268
269 Ok(())
270 }
271
272 fn execute_alu(&mut self, instruction: &Instruction) {
274 let AluResult { rd, rw_record, mut a, b, c, rs1, rs2 } = self.core.execute_alu(instruction);
275
276 if let MaybeImmediate::Register(rs2, rs2_record) = rs2 {
277 self.local_memory_access.insert_record(rs2 as u64, rs2_record);
278 }
279
280 if let MaybeImmediate::Register(rs1, rs1_record) = rs1 {
281 self.local_memory_access.insert_record(rs1 as u64, rs1_record);
282 }
283
284 self.local_memory_access.insert_record(rd as u64, rw_record);
285
286 let mem_access_record = MemoryAccessRecord {
287 a: Some(MemoryRecordEnum::Write(rw_record)),
288 b: rs1.record().map(|r| MemoryRecordEnum::Read(*r)),
289 c: rs2.record().map(|r| MemoryRecordEnum::Read(*r)),
290 memory: None,
291 untrusted_instruction: None,
292 };
293
294 let op_a_0 = instruction.op_a == 0;
295 if op_a_0 {
296 a = 0;
297 }
298
299 self.emit_events(self.core.clk(), self.core.next_pc(), instruction, &mem_access_record, 0);
300 self.emit_alu_event(instruction, a, b, c, &mem_access_record, op_a_0);
301 }
302
303 fn execute_jump(&mut self, instruction: &Instruction) {
305 let JumpResult { mut a, b, c, rd, rd_record, rs1 } = self.core.execute_jump(instruction);
306
307 if let MaybeImmediate::Register(rs1, rs1_record) = rs1 {
308 self.local_memory_access.insert_record(rs1 as u64, rs1_record);
309 }
310
311 self.local_memory_access.insert_record(rd as u64, rd_record);
312
313 let mem_access_record = MemoryAccessRecord {
314 a: Some(MemoryRecordEnum::Write(rd_record)),
315 b: rs1.record().map(|r| MemoryRecordEnum::Read(*r)),
316 c: None,
317 memory: None,
318 untrusted_instruction: None,
319 };
320
321 let op_a_0 = instruction.op_a == 0;
322 if op_a_0 {
323 a = 0;
324 }
325
326 self.emit_events(self.core.clk(), self.core.next_pc(), instruction, &mem_access_record, 0);
327 match instruction.opcode {
328 Opcode::JAL => self.emit_jal_event(
329 instruction,
330 a,
331 b,
332 c,
333 &mem_access_record,
334 op_a_0,
335 self.core.next_pc(),
336 ),
337 Opcode::JALR => self.emit_jalr_event(
338 instruction,
339 a,
340 b,
341 c,
342 &mem_access_record,
343 op_a_0,
344 self.core.next_pc(),
345 ),
346 _ => unreachable!("Invalid opcode for `execute_jump`: {:?}", instruction.opcode),
347 }
348 }
349
350 fn execute_branch(&mut self, instruction: &Instruction) {
352 let BranchResult { mut a, rs1, a_record, b, rs2, b_record, c } =
353 self.core.execute_branch(instruction);
354
355 self.local_memory_access.insert_record(rs2 as u64, b_record);
356 self.local_memory_access.insert_record(rs1 as u64, a_record);
357
358 let mem_access_record = MemoryAccessRecord {
359 a: Some(MemoryRecordEnum::Read(a_record)),
360 b: Some(MemoryRecordEnum::Read(b_record)),
361 c: None,
362 memory: None,
363 untrusted_instruction: None,
364 };
365
366 let op_a_0 = instruction.op_a == 0;
367 if op_a_0 {
368 a = 0;
369 }
370
371 self.emit_events(self.core.clk(), self.core.next_pc(), instruction, &mem_access_record, 0);
372 self.emit_branch_event(
373 instruction,
374 a,
375 b,
376 c,
377 &mem_access_record,
378 op_a_0,
379 self.core.next_pc(),
380 );
381 }
382
383 fn execute_utype(&mut self, instruction: &Instruction) {
385 let UTypeResult { mut a, b, c, rd, rw_record } = self.core.execute_utype(instruction);
386
387 self.local_memory_access.insert_record(rd as u64, rw_record);
388
389 let mem_access_record = MemoryAccessRecord {
390 a: Some(MemoryRecordEnum::Write(rw_record)),
391 b: None,
392 c: None,
393 memory: None,
394 untrusted_instruction: None,
395 };
396
397 let op_a_0 = instruction.op_a == 0;
398 if op_a_0 {
399 a = 0;
400 }
401
402 self.emit_events(self.core.clk(), self.core.next_pc(), instruction, &mem_access_record, 0);
403 self.emit_utype_event(instruction, a, b, c, &mem_access_record, op_a_0);
404 }
405
406 fn execute_ecall(&mut self, instruction: &Instruction) -> Result<(), ExecutionError> {
408 let code = self.core.read_code();
409
410 if !self.core().is_retained_syscall(code) && code.should_send() == 1 {
415 self.precompile_local_memory_access = Some(LocalMemoryAccess::default());
416 }
417
418 let EcallResult { a: _, a_record, b, b_record, c, c_record } =
420 CoreVM::<'a>::execute_ecall(self, instruction, code)?;
421
422 self.local_memory_access.insert_record(Register::X11 as u64, c_record);
423 self.local_memory_access.insert_record(Register::X10 as u64, b_record);
424 self.local_memory_access.insert_record(Register::X5 as u64, a_record);
425
426 let mem_access_record = MemoryAccessRecord {
427 a: Some(MemoryRecordEnum::Write(a_record)),
428 b: Some(MemoryRecordEnum::Read(b_record)),
429 c: Some(MemoryRecordEnum::Read(c_record)),
430 memory: None,
431 untrusted_instruction: None,
432 };
433
434 let op_a_0 = instruction.op_a == 0;
435 self.emit_events(
436 self.core.clk(),
437 self.core.next_pc(),
438 instruction,
439 &mem_access_record,
440 self.core.exit_code(),
441 );
442
443 self.emit_syscall_event(
444 self.core.clk(),
445 code,
446 b,
447 c,
448 &mem_access_record,
449 op_a_0,
450 self.core.next_pc(),
451 self.core.exit_code(),
452 instruction,
453 );
454
455 Ok(())
456 }
457}
458
459impl TracingVM<'_> {
460 #[allow(clippy::too_many_arguments)]
462 fn emit_events(
463 &mut self,
464 clk: u64,
465 next_pc: u64,
466 instruction: &Instruction,
467 record: &MemoryAccessRecord,
468 exit_code: u32,
469 ) {
470 self.record.pc_start.get_or_insert(self.core.pc());
471 self.record.next_pc = next_pc;
472 self.record.exit_code = exit_code;
473 self.record.cpu_event_count += 1;
474
475 let increment = self.core.next_clk() - clk;
476
477 let bump1 = clk % (1 << 24) + increment >= (1 << 24);
478 let bump2 = !instruction.is_with_correct_next_pc()
479 && next_pc == self.core.pc().wrapping_add(4)
480 && (next_pc >> 16) != (self.core.pc() >> 16);
481
482 if bump1 || bump2 {
483 self.record.bump_state_events.push((clk, increment, bump2, next_pc));
484 }
485
486 if let Some(x) = record.a {
487 if x.current_record().timestamp >> 24 != x.previous_record().timestamp >> 24 {
488 self.record.bump_memory_events.push((x, instruction.op_a as u64, false));
489 }
490 }
491 if let Some(x) = record.b {
492 if x.current_record().timestamp >> 24 != x.previous_record().timestamp >> 24 {
493 self.record.bump_memory_events.push((x, instruction.op_b, false));
494 }
495 }
496 if let Some(x) = record.c {
497 if x.current_record().timestamp >> 24 != x.previous_record().timestamp >> 24 {
498 self.record.bump_memory_events.push((x, instruction.op_c, false));
499 }
500 }
501 }
502
503 #[inline]
505 fn emit_mem_instr_event(
506 &mut self,
507 instruction: &Instruction,
508 a: u64,
509 b: u64,
510 c: u64,
511 record: &MemoryAccessRecord,
512 op_a_0: bool,
513 ) {
514 let opcode = instruction.opcode;
515 let event = MemInstrEvent {
516 clk: self.core.clk(),
517 pc: self.core.pc(),
518 opcode,
519 a,
520 b,
521 c,
522 op_a_0,
523 mem_access: unsafe { record.memory.unwrap_unchecked() },
527 };
528
529 let record = ITypeRecord::new(record, instruction);
530 if matches!(
531 opcode,
532 Opcode::LB
533 | Opcode::LBU
534 | Opcode::LH
535 | Opcode::LHU
536 | Opcode::LW
537 | Opcode::LWU
538 | Opcode::LD
539 ) && op_a_0
540 {
541 self.record.memory_load_x0_events.push((event, record));
542 } else if matches!(opcode, Opcode::LB | Opcode::LBU) {
543 self.record.memory_load_byte_events.push((event, record));
544 } else if matches!(opcode, Opcode::LH | Opcode::LHU) {
545 self.record.memory_load_half_events.push((event, record));
546 } else if matches!(opcode, Opcode::LW | Opcode::LWU) {
547 self.record.memory_load_word_events.push((event, record));
548 } else if opcode == Opcode::LD {
549 self.record.memory_load_double_events.push((event, record));
550 } else if opcode == Opcode::SB {
551 self.record.memory_store_byte_events.push((event, record));
552 } else if opcode == Opcode::SH {
553 self.record.memory_store_half_events.push((event, record));
554 } else if opcode == Opcode::SW {
555 self.record.memory_store_word_events.push((event, record));
556 } else if opcode == Opcode::SD {
557 self.record.memory_store_double_events.push((event, record));
558 }
559 }
560
561 fn emit_alu_event(
563 &mut self,
564 instruction: &Instruction,
565 a: u64,
566 b: u64,
567 c: u64,
568 record: &MemoryAccessRecord,
569 op_a_0: bool,
570 ) {
571 let opcode = instruction.opcode;
572 let event = AluEvent { clk: self.core.clk(), pc: self.core.pc(), opcode, a, b, c, op_a_0 };
573
574 if op_a_0 {
575 let record = ALUTypeRecord::new(record, instruction);
576 self.record.alu_x0_events.push((event, record));
577 return;
578 }
579
580 match opcode {
581 Opcode::ADD => {
582 let record = RTypeRecord::new(record, instruction);
583 self.record.add_events.push((event, record));
584 }
585 Opcode::ADDW => {
586 let record = ALUTypeRecord::new(record, instruction);
587 self.record.addw_events.push((event, record));
588 }
589 Opcode::ADDI => {
590 let record = ITypeRecord::new(record, instruction);
591 self.record.addi_events.push((event, record));
592 }
593 Opcode::SUB => {
594 let record = RTypeRecord::new(record, instruction);
595 self.record.sub_events.push((event, record));
596 }
597 Opcode::SUBW => {
598 let record = RTypeRecord::new(record, instruction);
599 self.record.subw_events.push((event, record));
600 }
601 Opcode::XOR | Opcode::OR | Opcode::AND => {
602 let record = ALUTypeRecord::new(record, instruction);
603 self.record.bitwise_events.push((event, record));
604 }
605 Opcode::SLL | Opcode::SLLW => {
606 let record = ALUTypeRecord::new(record, instruction);
607 self.record.shift_left_events.push((event, record));
608 }
609 Opcode::SRL | Opcode::SRA | Opcode::SRLW | Opcode::SRAW => {
610 let record = ALUTypeRecord::new(record, instruction);
611 self.record.shift_right_events.push((event, record));
612 }
613 Opcode::SLT | Opcode::SLTU => {
614 let record = ALUTypeRecord::new(record, instruction);
615 self.record.lt_events.push((event, record));
616 }
617 Opcode::MUL | Opcode::MULHU | Opcode::MULHSU | Opcode::MULH | Opcode::MULW => {
618 let record = RTypeRecord::new(record, instruction);
619 self.record.mul_events.push((event, record));
620 }
621 Opcode::DIVU
622 | Opcode::REMU
623 | Opcode::DIV
624 | Opcode::REM
625 | Opcode::DIVW
626 | Opcode::DIVUW
627 | Opcode::REMUW
628 | Opcode::REMW => {
629 let record = RTypeRecord::new(record, instruction);
630 self.record.divrem_events.push((event, record));
631 }
632 _ => unreachable!(),
633 }
634 }
635
636 #[inline]
638 #[allow(clippy::too_many_arguments)]
639 fn emit_jal_event(
640 &mut self,
641 instruction: &Instruction,
642 a: u64,
643 b: u64,
644 c: u64,
645 record: &MemoryAccessRecord,
646 op_a_0: bool,
647 next_pc: u64,
648 ) {
649 let event = JumpEvent {
650 clk: self.core.clk(),
651 pc: self.core.pc(),
652 next_pc,
653 opcode: instruction.opcode,
654 a,
655 b,
656 c,
657 op_a_0,
658 };
659 let record = JTypeRecord::new(record, instruction);
660 self.record.jal_events.push((event, record));
661 }
662
663 #[inline]
665 #[allow(clippy::too_many_arguments)]
666 fn emit_jalr_event(
667 &mut self,
668 instruction: &Instruction,
669 a: u64,
670 b: u64,
671 c: u64,
672 record: &MemoryAccessRecord,
673 op_a_0: bool,
674 next_pc: u64,
675 ) {
676 let event = JumpEvent {
677 clk: self.core.clk(),
678 pc: self.core.pc(),
679 next_pc,
680 opcode: instruction.opcode,
681 a,
682 b,
683 c,
684 op_a_0,
685 };
686 let record = ITypeRecord::new(record, instruction);
687 self.record.jalr_events.push((event, record));
688 }
689
690 #[inline]
692 #[allow(clippy::too_many_arguments)]
693 fn emit_branch_event(
694 &mut self,
695 instruction: &Instruction,
696 a: u64,
697 b: u64,
698 c: u64,
699 record: &MemoryAccessRecord,
700 op_a_0: bool,
701 next_pc: u64,
702 ) {
703 let event = BranchEvent {
704 clk: self.core.clk(),
705 pc: self.core.pc(),
706 next_pc,
707 opcode: instruction.opcode,
708 a,
709 b,
710 c,
711 op_a_0,
712 };
713 let record = ITypeRecord::new(record, instruction);
714 self.record.branch_events.push((event, record));
715 }
716
717 #[inline]
719 fn emit_utype_event(
720 &mut self,
721 instruction: &Instruction,
722 a: u64,
723 b: u64,
724 c: u64,
725 record: &MemoryAccessRecord,
726 op_a_0: bool,
727 ) {
728 let event = UTypeEvent {
729 clk: self.core.clk(),
730 pc: self.core.pc(),
731 opcode: instruction.opcode,
732 a,
733 b,
734 c,
735 op_a_0,
736 };
737 let record = JTypeRecord::new(record, instruction);
738 self.record.utype_events.push((event, record));
739 }
740
741 #[allow(clippy::too_many_arguments)]
743 fn emit_syscall_event(
744 &mut self,
745 clk: u64,
746 syscall_code: SyscallCode,
747 arg1: u64,
748 arg2: u64,
749 record: &MemoryAccessRecord,
750 op_a_0: bool,
751 next_pc: u64,
752 exit_code: u32,
753 instruction: &Instruction,
754 ) {
755 let syscall_event =
756 self.syscall_event(clk, syscall_code, arg1, arg2, op_a_0, next_pc, exit_code);
757
758 let record = RTypeRecord::new(record, instruction);
759 self.record.syscall_events.push((syscall_event, record));
760 }
761}
762
763impl<'a> SyscallRuntime<'a> for TracingVM<'a> {
764 const TRACING: bool = true;
765
766 fn core(&self) -> &CoreVM<'a> {
767 &self.core
768 }
769
770 fn core_mut(&mut self) -> &mut CoreVM<'a> {
771 &mut self.core
772 }
773
774 #[inline]
776 fn syscall_event(
777 &self,
778 clk: u64,
779 syscall_code: SyscallCode,
780 arg1: u64,
781 arg2: u64,
782 op_a_0: bool,
783 next_pc: u64,
784 exit_code: u32,
785 ) -> SyscallEvent {
786 let should_send =
788 syscall_code.should_send() != 0 && !self.core.is_retained_syscall(syscall_code);
789
790 SyscallEvent {
791 pc: self.core.pc(),
792 next_pc,
793 clk,
794 op_a_0,
795 should_send,
796 syscall_code,
797 syscall_id: syscall_code.syscall_id(),
798 arg1,
799 arg2,
800 exit_code,
801 }
802 }
803
804 fn add_precompile_event(
805 &mut self,
806 syscall_code: SyscallCode,
807 syscall_event: SyscallEvent,
808 event: PrecompileEvent,
809 ) {
810 self.record.precompile_events.add_event(syscall_code, syscall_event, event);
811 }
812
813 fn record_mut(&mut self) -> &mut ExecutionRecord {
814 self.record
815 }
816
817 fn rr(&mut self, register: usize) -> MemoryReadRecord {
818 let record = SyscallRuntime::rr(self.core_mut(), register);
819
820 if let Some(local_memory_access) = &mut self.precompile_local_memory_access {
821 local_memory_access.insert_record(register as u64, record);
822 } else {
823 self.local_memory_access.insert_record(register as u64, record);
824 }
825
826 record
827 }
828
829 fn mr(&mut self, addr: u64) -> MemoryReadRecord {
830 let record = SyscallRuntime::mr(self.core_mut(), addr);
831
832 if let Some(local_memory_access) = &mut self.precompile_local_memory_access {
833 local_memory_access.insert_record(addr, record);
834 } else {
835 self.local_memory_access.insert_record(addr, record);
836 }
837
838 record
839 }
840
841 fn mr_slice(&mut self, addr: u64, len: usize) -> Vec<MemoryReadRecord> {
842 let records = SyscallRuntime::mr_slice(self.core_mut(), addr, len);
843
844 for (i, record) in records.iter().enumerate() {
845 if let Some(local_memory_access) = &mut self.precompile_local_memory_access {
846 local_memory_access.insert_record(addr + i as u64 * 8, *record);
847 } else {
848 self.local_memory_access.insert_record(addr + i as u64 * 8, *record);
849 }
850 }
851
852 records
853 }
854
855 fn mw(&mut self, addr: u64) -> MemoryWriteRecord {
856 let record = SyscallRuntime::mw(self.core_mut(), addr);
857
858 if let Some(local_memory_access) = &mut self.precompile_local_memory_access {
859 local_memory_access.insert_record(addr, record);
860 } else {
861 self.local_memory_access.insert_record(addr, record);
862 }
863
864 record
865 }
866
867 fn mw_slice(&mut self, addr: u64, len: usize) -> Vec<MemoryWriteRecord> {
868 let records = SyscallRuntime::mw_slice(self.core_mut(), addr, len);
869
870 for (i, record) in records.iter().enumerate() {
871 if let Some(local_memory_access) = &mut self.precompile_local_memory_access {
872 local_memory_access.insert_record(addr + i as u64 * 8, *record);
873 } else {
874 self.local_memory_access.insert_record(addr + i as u64 * 8, *record);
875 }
876 }
877
878 records
879 }
880
881 fn postprocess_precompile(&mut self) -> Vec<MemoryLocalEvent> {
882 let mut precompile_local_memory_access = Vec::new();
883
884 if let Some(mut local_memory_access) =
885 std::mem::take(&mut self.precompile_local_memory_access)
886 {
887 for (addr, event) in local_memory_access.drain() {
888 if let Some(cpu_mem_access) = self.local_memory_access.remove(&addr) {
889 self.record.cpu_local_memory_access.push(cpu_mem_access);
890 }
891
892 precompile_local_memory_access.push(event);
893 }
894 }
895
896 precompile_local_memory_access
897 }
898}
899
900#[derive(Debug, Default)]
901pub struct LocalMemoryAccess {
902 pub inner: HashMap<u64, MemoryLocalEvent>,
903}
904
905impl LocalMemoryAccess {
906 #[inline]
907 #[allow(clippy::needless_pass_by_value)]
908 pub(crate) fn insert_record(&mut self, addr: u64, event: impl IntoMemoryRecord) {
909 self.inner
910 .entry(addr)
911 .and_modify(|e| {
912 let current_record = event.current_record();
913 let previous_record = event.previous_record();
914
915 if current_record.timestamp > e.final_mem_access.timestamp {
917 e.final_mem_access = current_record;
918 }
919
920 if previous_record.timestamp < e.initial_mem_access.timestamp {
922 e.initial_mem_access = previous_record;
923 }
924 })
925 .or_insert_with(|| MemoryLocalEvent {
926 addr,
927 initial_mem_access: event.previous_record(),
928 final_mem_access: event.current_record(),
929 });
930 }
931}
932
933impl Deref for LocalMemoryAccess {
934 type Target = HashMap<u64, MemoryLocalEvent>;
935 fn deref(&self) -> &Self::Target {
936 &self.inner
937 }
938}
939
940impl DerefMut for LocalMemoryAccess {
941 fn deref_mut(&mut self) -> &mut Self::Target {
942 &mut self.inner
943 }
944}