1#[cfg(feature = "profiling")]
2use std::{fs::File, io::BufWriter};
3use std::{str::FromStr, sync::Arc};
4
5use crate::estimator::RecordEstimator;
6#[cfg(feature = "profiling")]
7use crate::profiler::Profiler;
8
9use clap::ValueEnum;
10use enum_map::EnumMap;
11use hashbrown::HashMap;
12use serde::{Deserialize, Serialize};
13use sp1_primitives::consts::BABYBEAR_PRIME;
14use sp1_stark::{air::PublicValues, SP1CoreOpts};
15use strum::IntoEnumIterator;
16use thiserror::Error;
17
18use crate::{
19 context::{IoOptions, SP1Context},
20 dependencies::{
21 emit_auipc_dependency, emit_branch_dependencies, emit_divrem_dependencies,
22 emit_jump_dependencies, emit_memory_dependencies,
23 },
24 estimate_riscv_lde_size,
25 events::{
26 AUIPCEvent, AluEvent, BranchEvent, CpuEvent, JumpEvent, MemInstrEvent,
27 MemoryAccessPosition, MemoryInitializeFinalizeEvent, MemoryLocalEvent, MemoryReadRecord,
28 MemoryRecord, MemoryRecordEnum, MemoryWriteRecord, SyscallEvent,
29 NUM_LOCAL_MEMORY_ENTRIES_PER_ROW_EXEC,
30 },
31 hook::{HookEnv, HookRegistry},
32 memory::{Entry, Memory},
33 pad_rv32im_event_counts,
34 record::{ExecutionRecord, MemoryAccessRecord},
35 report::ExecutionReport,
36 state::{ExecutionState, ForkState},
37 subproof::SubproofVerifier,
38 syscalls::{default_syscall_map, Syscall, SyscallCode, SyscallContext},
39 CoreAirId, Instruction, MaximalShapes, Opcode, Program, Register, RiscvAirId,
40};
41
42pub const DEFAULT_PC_INC: u32 = 4;
45pub const UNUSED_PC: u32 = 1;
48
49pub const MAX_PROGRAM_SIZE: usize = 1 << 22;
51
52#[derive(Debug, Clone, Copy, PartialEq, Eq)]
53pub enum DeferredProofVerification {
55 Enabled,
57 Disabled,
59}
60
61impl From<bool> for DeferredProofVerification {
62 fn from(value: bool) -> Self {
63 if value {
64 DeferredProofVerification::Enabled
65 } else {
66 DeferredProofVerification::Disabled
67 }
68 }
69}
70
71pub struct Executor<'a> {
76 pub program: Arc<Program>,
78
79 pub state: ExecutionState,
81
82 pub memory_checkpoint: Memory<Option<MemoryRecord>>,
85
86 pub uninitialized_memory_checkpoint: Memory<bool>,
90
91 pub report: ExecutionReport,
93
94 pub executor_mode: ExecutorMode,
96
97 pub memory_accesses: MemoryAccessRecord,
99
100 pub unconstrained: bool,
106
107 pub print_report: bool,
109
110 pub record_estimator: Option<Box<RecordEstimator>>,
112
113 pub emit_global_memory_events: bool,
116
117 pub shard_size: u32,
119
120 pub shard_batch_size: u32,
122
123 pub max_syscall_cycles: u32,
125
126 pub syscall_map: HashMap<SyscallCode, Arc<dyn Syscall>>,
128
129 pub opts: SP1CoreOpts,
131
132 pub max_cycles: Option<u64>,
134
135 pub record: Box<ExecutionRecord>,
137
138 pub records: Vec<Box<ExecutionRecord>>,
140
141 pub local_memory_access: HashMap<u32, MemoryLocalEvent>,
143
144 pub cycle_tracker: HashMap<String, (u64, u32)>,
146
147 pub io_buf: HashMap<u32, String>,
149
150 #[cfg(feature = "profiling")]
154 pub profiler: Option<(Profiler, BufWriter<File>)>,
155
156 pub unconstrained_state: Box<ForkState>,
158
159 pub local_counts: LocalCounts,
161
162 pub subproof_verifier: Option<&'a dyn SubproofVerifier>,
164
165 pub hook_registry: HookRegistry<'a>,
167
168 pub maximal_shapes: Option<MaximalShapes>,
170
171 pub costs: HashMap<RiscvAirId, u64>,
173
174 pub deferred_proof_verification: DeferredProofVerification,
177
178 pub shape_check_frequency: u64,
180
181 pub lde_size_check: bool,
183
184 pub lde_size_threshold: u64,
186
187 pub io_options: IoOptions<'a>,
189
190 event_counts: EnumMap<RiscvAirId, u64>,
192}
193
194#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize, ValueEnum)]
196pub enum ExecutorMode {
197 #[default]
199 Simple,
200 Checkpoint,
202 Trace,
204 ShapeCollection,
206}
207
208#[derive(Debug, Default, Clone, PartialEq, Eq)]
210pub struct LocalCounts {
211 pub event_counts: Box<EnumMap<Opcode, u64>>,
213 pub syscalls_sent: usize,
215 pub local_mem: usize,
217}
218
219#[derive(Error, Debug, Serialize, Deserialize, PartialEq, Eq)]
221pub enum ExecutionError {
222 #[error("execution failed with exit code {0}")]
224 HaltWithNonZeroExitCode(u32),
225
226 #[error("invalid memory access for opcode {0} and address {1}")]
228 InvalidMemoryAccess(Opcode, u32),
229
230 #[error("unimplemented syscall {0}")]
232 UnsupportedSyscall(u32),
233
234 #[error("breakpoint encountered")]
236 Breakpoint(),
237
238 #[error("exceeded cycle limit of {0}")]
240 ExceededCycleLimit(u64),
241
242 #[error("syscall called in unconstrained mode")]
244 InvalidSyscallUsage(u64),
245
246 #[error("got unimplemented as opcode")]
248 Unimplemented(),
249
250 #[error("program ended in unconstrained mode")]
252 EndInUnconstrained(),
253
254 #[error("unconstrained cycle limit exceeded")]
256 UnconstrainedCycleLimitExceeded(u64),
257}
258
259impl<'a> Executor<'a> {
260 #[must_use]
262 pub fn new(program: Program, opts: SP1CoreOpts) -> Self {
263 Self::with_context(program, opts, SP1Context::default())
264 }
265
266 #[inline]
277 #[allow(unused_variables)]
278 pub fn maybe_setup_profiler(&mut self, elf_bytes: &[u8]) {
279 #[cfg(feature = "profiling")]
280 {
281 let trace_buf = std::env::var("TRACE_FILE").ok().map(|file| {
282 let file = File::create(file).unwrap();
283 BufWriter::new(file)
284 });
285
286 if let Some(trace_buf) = trace_buf {
287 eprintln!("Profiling enabled");
288
289 let sample_rate = std::env::var("TRACE_SAMPLE_RATE")
290 .ok()
291 .and_then(|rate| {
292 eprintln!("Profiling sample rate: {rate}");
293 rate.parse::<u32>().ok()
294 })
295 .unwrap_or(1);
296
297 self.profiler = Some((
298 Profiler::new(elf_bytes, sample_rate as u64)
299 .expect("Failed to create profiler"),
300 trace_buf,
301 ));
302 }
303 }
304 }
305
306 #[must_use]
308 pub fn with_context(program: Program, opts: SP1CoreOpts, context: SP1Context<'a>) -> Self {
309 let program = Arc::new(program);
311
312 let record = ExecutionRecord::new(program.clone());
314
315 let syscall_map = default_syscall_map();
317 let max_syscall_cycles =
318 syscall_map.values().map(|syscall| syscall.num_extra_cycles()).max().unwrap_or(0);
319
320 let hook_registry = context.hook_registry.unwrap_or_default();
321
322 let costs: HashMap<String, usize> =
323 serde_json::from_str(include_str!("./artifacts/rv32im_costs.json")).unwrap();
324 let costs: HashMap<RiscvAirId, usize> =
325 costs.into_iter().map(|(k, v)| (RiscvAirId::from_str(&k).unwrap(), v)).collect();
326
327 Self {
328 record: Box::new(record),
329 records: vec![],
330 state: ExecutionState::new(program.pc_start),
331 program,
332 memory_accesses: MemoryAccessRecord::default(),
333 shard_size: (opts.shard_size as u32) * 4,
334 shard_batch_size: opts.shard_batch_size as u32,
335 cycle_tracker: HashMap::new(),
336 io_buf: HashMap::new(),
337 #[cfg(feature = "profiling")]
338 profiler: None,
339 unconstrained: false,
340 unconstrained_state: Box::new(ForkState::default()),
341 syscall_map,
342 executor_mode: ExecutorMode::Trace,
343 emit_global_memory_events: true,
344 max_syscall_cycles,
345 report: ExecutionReport::default(),
346 local_counts: LocalCounts::default(),
347 print_report: false,
348 record_estimator: None,
349 subproof_verifier: context.subproof_verifier,
350 hook_registry,
351 opts,
352 max_cycles: context.max_cycles,
353 deferred_proof_verification: context.deferred_proof_verification.into(),
354 memory_checkpoint: Memory::default(),
355 uninitialized_memory_checkpoint: Memory::default(),
356 local_memory_access: HashMap::new(),
357 maximal_shapes: None,
358 costs: costs.into_iter().map(|(k, v)| (k, v as u64)).collect(),
359 shape_check_frequency: 16,
360 lde_size_check: false,
361 lde_size_threshold: 0,
362 event_counts: EnumMap::default(),
363 io_options: context.io_options,
364 }
365 }
366
367 pub fn hook(&self, fd: u32, buf: &[u8]) -> eyre::Result<Vec<Vec<u8>>> {
374 Ok(self
375 .hook_registry
376 .get(fd)
377 .ok_or(eyre::eyre!("no hook found for file descriptor {}", fd))?
378 .invoke_hook(self.hook_env(), buf))
379 }
380
381 #[must_use]
383 pub fn hook_env<'b>(&'b self) -> HookEnv<'b, 'a> {
384 HookEnv { runtime: self }
385 }
386
387 #[must_use]
389 pub fn recover(program: Program, state: ExecutionState, opts: SP1CoreOpts) -> Self {
390 let mut runtime = Self::new(program, opts);
391 runtime.state = state;
392 runtime.deferred_proof_verification = DeferredProofVerification::Disabled;
395 runtime
396 }
397
398 #[allow(clippy::single_match_else)]
400 #[must_use]
401 pub fn registers(&mut self) -> [u32; 32] {
402 let mut registers = [0; 32];
403 for i in 0..32 {
404 let record = self.state.memory.registers.get(i);
405
406 if self.executor_mode == ExecutorMode::Checkpoint || self.unconstrained {
410 match record {
411 Some(record) => {
412 self.memory_checkpoint.registers.entry(i).or_insert_with(|| Some(*record));
413 }
414 None => {
415 self.memory_checkpoint.registers.entry(i).or_insert(None);
416 }
417 }
418 }
419
420 registers[i as usize] = match record {
421 Some(record) => record.value,
422 None => 0,
423 };
424 }
425 registers
426 }
427
428 #[must_use]
430 pub fn register(&mut self, register: Register) -> u32 {
431 let addr = register as u32;
432 let record = self.state.memory.registers.get(addr);
433
434 if self.executor_mode == ExecutorMode::Checkpoint || self.unconstrained {
435 match record {
436 Some(record) => {
437 self.memory_checkpoint.registers.entry(addr).or_insert_with(|| Some(*record));
438 }
439 None => {
440 self.memory_checkpoint.registers.entry(addr).or_insert(None);
441 }
442 }
443 }
444 match record {
445 Some(record) => record.value,
446 None => 0,
447 }
448 }
449
450 #[must_use]
454 pub fn word(&mut self, addr: u32) -> u32 {
455 #[allow(clippy::single_match_else)]
456 let record = self.state.memory.page_table.get(addr);
457
458 if self.executor_mode == ExecutorMode::Checkpoint || self.unconstrained {
459 match record {
460 Some(record) => {
461 self.memory_checkpoint.page_table.entry(addr).or_insert_with(|| Some(*record));
462 }
463 None => {
464 self.memory_checkpoint.page_table.entry(addr).or_insert(None);
465 }
466 }
467 }
468
469 match record {
470 Some(record) => record.value,
471 None => 0,
472 }
473 }
474
475 #[must_use]
479 pub fn byte(&mut self, addr: u32) -> u8 {
480 let word = self.word(addr - addr % 4);
481 (word >> ((addr % 4) * 8)) as u8
482 }
483
484 #[must_use]
486 pub const fn timestamp(&self, position: &MemoryAccessPosition) -> u32 {
487 self.state.clk + *position as u32
488 }
489
490 #[must_use]
492 #[inline]
493 pub fn shard(&self) -> u32 {
494 self.state.current_shard
495 }
496
497 pub fn mr(
499 &mut self,
500 addr: u32,
501 shard: u32,
502 timestamp: u32,
503 local_memory_access: Option<&mut HashMap<u32, MemoryLocalEvent>>,
504 ) -> MemoryReadRecord {
505 if !addr.is_multiple_of(4) || addr <= Register::X31 as u32 || addr >= BABYBEAR_PRIME {
508 panic!("Invalid memory access: addr={addr}");
509 }
510
511 let entry = self.state.memory.page_table.entry(addr);
513 if self.executor_mode == ExecutorMode::Checkpoint || self.unconstrained {
514 match entry {
515 Entry::Occupied(ref entry) => {
516 let record = entry.get();
517 self.memory_checkpoint.page_table.entry(addr).or_insert_with(|| Some(*record));
518 }
519 Entry::Vacant(_) => {
520 self.memory_checkpoint.page_table.entry(addr).or_insert(None);
521 }
522 }
523 }
524
525 if self.unconstrained {
528 let record = match entry {
529 Entry::Occupied(ref entry) => Some(entry.get()),
530 Entry::Vacant(_) => None,
531 };
532 self.unconstrained_state.memory_diff.entry(addr).or_insert(record.copied());
533 }
534
535 let record: &mut MemoryRecord = match entry {
537 Entry::Occupied(entry) => entry.into_mut(),
538 Entry::Vacant(entry) => {
539 let value = self.state.uninitialized_memory.page_table.get(addr).unwrap_or(&0);
541 self.uninitialized_memory_checkpoint
542 .page_table
543 .entry(addr)
544 .or_insert_with(|| *value != 0);
545 entry.insert(MemoryRecord { value: *value, shard: 0, timestamp: 0 })
546 }
547 };
548
549 if !self.unconstrained && (record.shard != shard || local_memory_access.is_some()) {
556 self.local_counts.local_mem += 1;
557 }
558
559 if !self.unconstrained {
560 if let Some(estimator) = &mut self.record_estimator {
561 if record.shard != shard {
562 estimator.current_local_mem += 1;
563 }
564 let current_touched_compressed_addresses = if local_memory_access.is_some() {
565 &mut estimator.current_precompile_touched_compressed_addresses
566 } else {
567 &mut estimator.current_touched_compressed_addresses
568 };
569 current_touched_compressed_addresses.insert(addr >> 2);
570 }
571 }
572
573 let prev_record = *record;
574 record.shard = shard;
575 record.timestamp = timestamp;
576
577 if !self.unconstrained && self.executor_mode == ExecutorMode::Trace {
578 let local_memory_access = if let Some(local_memory_access) = local_memory_access {
579 local_memory_access
580 } else {
581 &mut self.local_memory_access
582 };
583
584 local_memory_access
585 .entry(addr)
586 .and_modify(|e| {
587 e.final_mem_access = *record;
588 })
589 .or_insert(MemoryLocalEvent {
590 addr,
591 initial_mem_access: prev_record,
592 final_mem_access: *record,
593 });
594 }
595
596 MemoryReadRecord::new(
598 record.value,
599 record.shard,
600 record.timestamp,
601 prev_record.shard,
602 prev_record.timestamp,
603 )
604 }
605
606 pub fn rr(&mut self, register: Register, shard: u32, timestamp: u32) -> u32 {
610 let addr = register as u32;
612 let entry = self.state.memory.registers.entry(addr);
613 if self.executor_mode == ExecutorMode::Checkpoint || self.unconstrained {
614 match entry {
615 Entry::Occupied(ref entry) => {
616 let record = entry.get();
617 self.memory_checkpoint.registers.entry(addr).or_insert_with(|| Some(*record));
618 }
619 Entry::Vacant(_) => {
620 self.memory_checkpoint.registers.entry(addr).or_insert(None);
621 }
622 }
623 }
624
625 if self.unconstrained {
628 let record = match entry {
629 Entry::Occupied(ref entry) => Some(entry.get()),
630 Entry::Vacant(_) => None,
631 };
632 self.unconstrained_state.memory_diff.entry(addr).or_insert(record.copied());
633 }
634
635 let record: &mut MemoryRecord = match entry {
637 Entry::Occupied(entry) => entry.into_mut(),
638 Entry::Vacant(entry) => {
639 let value = self.state.uninitialized_memory.registers.get(addr).unwrap_or(&0);
641 self.uninitialized_memory_checkpoint
642 .registers
643 .entry(addr)
644 .or_insert_with(|| *value != 0);
645 entry.insert(MemoryRecord { value: *value, shard: 0, timestamp: 0 })
646 }
647 };
648
649 record.shard = shard;
650 record.timestamp = timestamp;
651 record.value
652 }
653
654 pub fn rr_traced(
658 &mut self,
659 register: Register,
660 shard: u32,
661 timestamp: u32,
662 local_memory_access: Option<&mut HashMap<u32, MemoryLocalEvent>>,
663 ) -> MemoryReadRecord {
664 let addr = register as u32;
666 let entry = self.state.memory.registers.entry(addr);
667 if self.executor_mode == ExecutorMode::Checkpoint || self.unconstrained {
668 match entry {
669 Entry::Occupied(ref entry) => {
670 let record = entry.get();
671 self.memory_checkpoint.registers.entry(addr).or_insert_with(|| Some(*record));
672 }
673 Entry::Vacant(_) => {
674 self.memory_checkpoint.registers.entry(addr).or_insert(None);
675 }
676 }
677 }
678 if self.unconstrained {
681 let record = match entry {
682 Entry::Occupied(ref entry) => Some(entry.get()),
683 Entry::Vacant(_) => None,
684 };
685 self.unconstrained_state.memory_diff.entry(addr).or_insert(record.copied());
686 }
687 let record: &mut MemoryRecord = match entry {
689 Entry::Occupied(entry) => entry.into_mut(),
690 Entry::Vacant(entry) => {
691 let value = self.state.uninitialized_memory.registers.get(addr).unwrap_or(&0);
693 self.uninitialized_memory_checkpoint
694 .registers
695 .entry(addr)
696 .or_insert_with(|| *value != 0);
697 entry.insert(MemoryRecord { value: *value, shard: 0, timestamp: 0 })
698 }
699 };
700 let prev_record = *record;
701 record.shard = shard;
702 record.timestamp = timestamp;
703 if !self.unconstrained && self.executor_mode == ExecutorMode::Trace {
704 let local_memory_access = if let Some(local_memory_access) = local_memory_access {
705 local_memory_access
706 } else {
707 &mut self.local_memory_access
708 };
709 local_memory_access
710 .entry(addr)
711 .and_modify(|e| {
712 e.final_mem_access = *record;
713 })
714 .or_insert(MemoryLocalEvent {
715 addr,
716 initial_mem_access: prev_record,
717 final_mem_access: *record,
718 });
719 }
720 MemoryReadRecord::new(
722 record.value,
723 record.shard,
724 record.timestamp,
725 prev_record.shard,
726 prev_record.timestamp,
727 )
728 }
729 pub fn mw(
731 &mut self,
732 addr: u32,
733 value: u32,
734 shard: u32,
735 timestamp: u32,
736 local_memory_access: Option<&mut HashMap<u32, MemoryLocalEvent>>,
737 ) -> MemoryWriteRecord {
738 if !addr.is_multiple_of(4) || addr <= Register::X31 as u32 || addr >= BABYBEAR_PRIME {
741 panic!("Invalid memory access: addr={addr}");
742 }
743
744 let entry = self.state.memory.page_table.entry(addr);
746 if self.executor_mode == ExecutorMode::Checkpoint || self.unconstrained {
747 match entry {
748 Entry::Occupied(ref entry) => {
749 let record = entry.get();
750 self.memory_checkpoint.page_table.entry(addr).or_insert_with(|| Some(*record));
751 }
752 Entry::Vacant(_) => {
753 self.memory_checkpoint.page_table.entry(addr).or_insert(None);
754 }
755 }
756 }
757 if self.unconstrained {
760 let record = match entry {
761 Entry::Occupied(ref entry) => Some(entry.get()),
762 Entry::Vacant(_) => None,
763 };
764 self.unconstrained_state.memory_diff.entry(addr).or_insert(record.copied());
765 }
766 let record: &mut MemoryRecord = match entry {
768 Entry::Occupied(entry) => entry.into_mut(),
769 Entry::Vacant(entry) => {
770 let value = self.state.uninitialized_memory.page_table.get(addr).unwrap_or(&0);
772 self.uninitialized_memory_checkpoint
773 .page_table
774 .entry(addr)
775 .or_insert_with(|| *value != 0);
776
777 entry.insert(MemoryRecord { value: *value, shard: 0, timestamp: 0 })
778 }
779 };
780
781 if !self.unconstrained && (record.shard != shard || local_memory_access.is_some()) {
788 self.local_counts.local_mem += 1;
789 }
790
791 if !self.unconstrained {
792 if let Some(estimator) = &mut self.record_estimator {
793 if record.shard != shard {
794 estimator.current_local_mem += 1;
795 }
796 let current_touched_compressed_addresses = if local_memory_access.is_some() {
797 &mut estimator.current_precompile_touched_compressed_addresses
798 } else {
799 &mut estimator.current_touched_compressed_addresses
800 };
801 current_touched_compressed_addresses.insert(addr >> 2);
802 }
803 }
804
805 let prev_record = *record;
806 record.value = value;
807 record.shard = shard;
808 record.timestamp = timestamp;
809 if !self.unconstrained && self.executor_mode == ExecutorMode::Trace {
810 let local_memory_access = if let Some(local_memory_access) = local_memory_access {
811 local_memory_access
812 } else {
813 &mut self.local_memory_access
814 };
815
816 local_memory_access
817 .entry(addr)
818 .and_modify(|e| {
819 e.final_mem_access = *record;
820 })
821 .or_insert(MemoryLocalEvent {
822 addr,
823 initial_mem_access: prev_record,
824 final_mem_access: *record,
825 });
826 }
827
828 MemoryWriteRecord::new(
830 record.value,
831 record.shard,
832 record.timestamp,
833 prev_record.value,
834 prev_record.shard,
835 prev_record.timestamp,
836 )
837 }
838
839 pub fn rw_traced(
843 &mut self,
844 register: Register,
845 value: u32,
846 shard: u32,
847 timestamp: u32,
848 local_memory_access: Option<&mut HashMap<u32, MemoryLocalEvent>>,
849 ) -> MemoryWriteRecord {
850 let addr = register as u32;
851
852 let entry = self.state.memory.registers.entry(addr);
854 if self.unconstrained {
855 match entry {
856 Entry::Occupied(ref entry) => {
857 let record = entry.get();
858 self.memory_checkpoint.registers.entry(addr).or_insert_with(|| Some(*record));
859 }
860 Entry::Vacant(_) => {
861 self.memory_checkpoint.registers.entry(addr).or_insert(None);
862 }
863 }
864 }
865
866 if self.unconstrained {
869 let record = match entry {
870 Entry::Occupied(ref entry) => Some(entry.get()),
871 Entry::Vacant(_) => None,
872 };
873 self.unconstrained_state.memory_diff.entry(addr).or_insert(record.copied());
874 }
875
876 let record: &mut MemoryRecord = match entry {
878 Entry::Occupied(entry) => entry.into_mut(),
879 Entry::Vacant(entry) => {
880 let value = self.state.uninitialized_memory.registers.get(addr).unwrap_or(&0);
882 self.uninitialized_memory_checkpoint
883 .registers
884 .entry(addr)
885 .or_insert_with(|| *value != 0);
886
887 entry.insert(MemoryRecord { value: *value, shard: 0, timestamp: 0 })
888 }
889 };
890
891 let prev_record = *record;
892 record.value = value;
893 record.shard = shard;
894 record.timestamp = timestamp;
895
896 if !self.unconstrained {
897 let local_memory_access = if let Some(local_memory_access) = local_memory_access {
898 local_memory_access
899 } else {
900 &mut self.local_memory_access
901 };
902
903 local_memory_access
904 .entry(addr)
905 .and_modify(|e| {
906 e.final_mem_access = *record;
907 })
908 .or_insert(MemoryLocalEvent {
909 addr,
910 initial_mem_access: prev_record,
911 final_mem_access: *record,
912 });
913 }
914
915 MemoryWriteRecord::new(
917 record.value,
918 record.shard,
919 record.timestamp,
920 prev_record.value,
921 prev_record.shard,
922 prev_record.timestamp,
923 )
924 }
925
926 #[inline]
930 pub fn rw(&mut self, register: Register, value: u32, shard: u32, timestamp: u32) {
931 let addr = register as u32;
932 let entry = self.state.memory.registers.entry(addr);
934 if self.executor_mode == ExecutorMode::Checkpoint || self.unconstrained {
935 match entry {
936 Entry::Occupied(ref entry) => {
937 let record = entry.get();
938 self.memory_checkpoint.registers.entry(addr).or_insert_with(|| Some(*record));
939 }
940 Entry::Vacant(_) => {
941 self.memory_checkpoint.registers.entry(addr).or_insert(None);
942 }
943 }
944 }
945
946 if self.unconstrained {
949 let record = match entry {
950 Entry::Occupied(ref entry) => Some(entry.get()),
951 Entry::Vacant(_) => None,
952 };
953 self.unconstrained_state.memory_diff.entry(addr).or_insert(record.copied());
954 }
955
956 let record: &mut MemoryRecord = match entry {
958 Entry::Occupied(entry) => entry.into_mut(),
959 Entry::Vacant(entry) => {
960 let value = self.state.uninitialized_memory.registers.get(addr).unwrap_or(&0);
962 self.uninitialized_memory_checkpoint
963 .registers
964 .entry(addr)
965 .or_insert_with(|| *value != 0);
966
967 entry.insert(MemoryRecord { value: *value, shard: 0, timestamp: 0 })
968 }
969 };
970
971 record.value = value;
972 record.shard = shard;
973 record.timestamp = timestamp;
974 }
975
976 #[inline]
978 pub fn mr_cpu(&mut self, addr: u32) -> u32 {
979 let record =
981 self.mr(addr, self.shard(), self.timestamp(&MemoryAccessPosition::Memory), None);
982 if self.executor_mode == ExecutorMode::Trace {
984 self.memory_accesses.memory = Some(record.into());
985 }
986 record.value
987 }
988
989 #[inline]
991 pub fn rr_cpu(&mut self, register: Register, position: MemoryAccessPosition) -> u32 {
992 if self.executor_mode == ExecutorMode::Trace {
994 let record = self.rr_traced(register, self.shard(), self.timestamp(&position), None);
995 if !self.unconstrained {
996 match position {
997 MemoryAccessPosition::A => self.memory_accesses.a = Some(record.into()),
998 MemoryAccessPosition::B => self.memory_accesses.b = Some(record.into()),
999 MemoryAccessPosition::C => self.memory_accesses.c = Some(record.into()),
1000 MemoryAccessPosition::Memory => {
1001 self.memory_accesses.memory = Some(record.into());
1002 }
1003 }
1004 }
1005 record.value
1006 } else {
1007 self.rr(register, self.shard(), self.timestamp(&position))
1008 }
1009 }
1010
1011 pub fn mw_cpu(&mut self, addr: u32, value: u32) {
1018 let record =
1020 self.mw(addr, value, self.shard(), self.timestamp(&MemoryAccessPosition::Memory), None);
1021 if self.executor_mode == ExecutorMode::Trace {
1023 debug_assert!(self.memory_accesses.memory.is_none());
1024 self.memory_accesses.memory = Some(record.into());
1025 }
1026 }
1027
1028 pub fn rw_cpu(&mut self, register: Register, value: u32) {
1030 let position = MemoryAccessPosition::A;
1032
1033 let value = if register == Register::X0 { 0 } else { value };
1036
1037 if self.executor_mode == ExecutorMode::Trace {
1039 let record =
1040 self.rw_traced(register, value, self.shard(), self.timestamp(&position), None);
1041 if !self.unconstrained {
1042 debug_assert!(self.memory_accesses.a.is_none());
1044 self.memory_accesses.a = Some(record.into());
1045 }
1046 } else {
1047 self.rw(register, value, self.shard(), self.timestamp(&position));
1048 }
1049 }
1050
1051 #[allow(clippy::too_many_arguments)]
1053 fn emit_events(
1054 &mut self,
1055 clk: u32,
1056 next_pc: u32,
1057 instruction: &Instruction,
1058 syscall_code: SyscallCode,
1059 a: u32,
1060 b: u32,
1061 c: u32,
1062 op_a_0: bool,
1063 record: MemoryAccessRecord,
1064 exit_code: u32,
1065 ) {
1066 self.emit_cpu(clk, next_pc, a, b, c, record, exit_code);
1067
1068 if instruction.is_alu_instruction() {
1069 self.emit_alu_event(instruction.opcode, a, b, c, op_a_0);
1070 } else if instruction.is_memory_load_instruction() ||
1071 instruction.is_memory_store_instruction()
1072 {
1073 self.emit_mem_instr_event(instruction.opcode, a, b, c, op_a_0);
1074 } else if instruction.is_branch_instruction() {
1075 self.emit_branch_event(instruction.opcode, a, b, c, op_a_0, next_pc);
1076 } else if instruction.is_jump_instruction() {
1077 self.emit_jump_event(instruction.opcode, a, b, c, op_a_0, next_pc);
1078 } else if instruction.is_auipc_instruction() {
1079 self.emit_auipc_event(instruction.opcode, a, b, c, op_a_0);
1080 } else if instruction.is_ecall_instruction() {
1081 self.emit_syscall_event(clk, record.a, op_a_0, syscall_code, b, c, next_pc);
1082 } else {
1083 unreachable!()
1084 }
1085 }
1086
1087 #[allow(clippy::too_many_arguments)]
1089 #[inline]
1090 fn emit_cpu(
1091 &mut self,
1092 clk: u32,
1093 next_pc: u32,
1094 a: u32,
1095 b: u32,
1096 c: u32,
1097 record: MemoryAccessRecord,
1098 exit_code: u32,
1099 ) {
1100 self.record.cpu_events.push(CpuEvent {
1101 clk,
1102 pc: self.state.pc,
1103 next_pc,
1104 a,
1105 a_record: record.a,
1106 b,
1107 b_record: record.b,
1108 c,
1109 c_record: record.c,
1110 exit_code,
1111 });
1112 }
1113
1114 fn emit_alu_event(&mut self, opcode: Opcode, a: u32, b: u32, c: u32, op_a_0: bool) {
1116 let event = AluEvent { pc: self.state.pc, opcode, a, b, c, op_a_0 };
1117 match opcode {
1118 Opcode::ADD => {
1119 self.record.add_events.push(event);
1120 }
1121 Opcode::SUB => {
1122 self.record.sub_events.push(event);
1123 }
1124 Opcode::XOR | Opcode::OR | Opcode::AND => {
1125 self.record.bitwise_events.push(event);
1126 }
1127 Opcode::SLL => {
1128 self.record.shift_left_events.push(event);
1129 }
1130 Opcode::SRL | Opcode::SRA => {
1131 self.record.shift_right_events.push(event);
1132 }
1133 Opcode::SLT | Opcode::SLTU => {
1134 self.record.lt_events.push(event);
1135 }
1136 Opcode::MUL | Opcode::MULHU | Opcode::MULHSU | Opcode::MULH => {
1137 self.record.mul_events.push(event);
1138 }
1139 Opcode::DIVU | Opcode::REMU | Opcode::DIV | Opcode::REM => {
1140 self.record.divrem_events.push(event);
1141 emit_divrem_dependencies(self, event);
1142 }
1143 _ => unreachable!(),
1144 }
1145 }
1146
1147 #[inline]
1149 fn emit_mem_instr_event(&mut self, opcode: Opcode, a: u32, b: u32, c: u32, op_a_0: bool) {
1150 let event = MemInstrEvent {
1151 shard: self.shard(),
1152 clk: self.state.clk,
1153 pc: self.state.pc,
1154 opcode,
1155 a,
1156 b,
1157 c,
1158 op_a_0,
1159 mem_access: self.memory_accesses.memory.expect("Must have memory access"),
1160 };
1161
1162 self.record.memory_instr_events.push(event);
1163 emit_memory_dependencies(
1164 self,
1165 event,
1166 self.memory_accesses.memory.expect("Must have memory access").current_record(),
1167 );
1168 }
1169
1170 #[inline]
1172 fn emit_branch_event(
1173 &mut self,
1174 opcode: Opcode,
1175 a: u32,
1176 b: u32,
1177 c: u32,
1178 op_a_0: bool,
1179 next_pc: u32,
1180 ) {
1181 let event = BranchEvent { pc: self.state.pc, next_pc, opcode, a, b, c, op_a_0 };
1182 self.record.branch_events.push(event);
1183 emit_branch_dependencies(self, event);
1184 }
1185
1186 #[inline]
1188 fn emit_jump_event(
1189 &mut self,
1190 opcode: Opcode,
1191 a: u32,
1192 b: u32,
1193 c: u32,
1194 op_a_0: bool,
1195 next_pc: u32,
1196 ) {
1197 let event = JumpEvent::new(self.state.pc, next_pc, opcode, a, b, c, op_a_0);
1198 self.record.jump_events.push(event);
1199 emit_jump_dependencies(self, event);
1200 }
1201
1202 #[inline]
1204 fn emit_auipc_event(&mut self, opcode: Opcode, a: u32, b: u32, c: u32, op_a_0: bool) {
1205 let event = AUIPCEvent::new(self.state.pc, opcode, a, b, c, op_a_0);
1206 self.record.auipc_events.push(event);
1207 emit_auipc_dependency(self, event);
1208 }
1209
1210 #[allow(clippy::too_many_arguments)]
1212 #[inline]
1213 pub(crate) fn syscall_event(
1214 &self,
1215 clk: u32,
1216 a_record: Option<MemoryRecordEnum>,
1217 op_a_0: Option<bool>,
1218 syscall_code: SyscallCode,
1219 arg1: u32,
1220 arg2: u32,
1221 next_pc: u32,
1222 ) -> SyscallEvent {
1223 let (write, is_real) = match a_record {
1224 Some(MemoryRecordEnum::Write(record)) => (record, true),
1225 _ => (MemoryWriteRecord::default(), false),
1226 };
1227
1228 let op_a_0 = op_a_0.unwrap_or(false);
1234
1235 SyscallEvent {
1236 shard: self.shard(),
1237 clk,
1238 pc: self.state.pc,
1239 next_pc,
1240 a_record: write,
1241 a_record_is_real: is_real,
1242 op_a_0,
1243 syscall_code,
1244 syscall_id: syscall_code.syscall_id(),
1245 arg1,
1246 arg2,
1247 }
1248 }
1249
1250 #[allow(clippy::too_many_arguments)]
1252 fn emit_syscall_event(
1253 &mut self,
1254 clk: u32,
1255 a_record: Option<MemoryRecordEnum>,
1256 op_a_0: bool,
1257 syscall_code: SyscallCode,
1258 arg1: u32,
1259 arg2: u32,
1260 next_pc: u32,
1261 ) {
1262 let syscall_event =
1263 self.syscall_event(clk, a_record, Some(op_a_0), syscall_code, arg1, arg2, next_pc);
1264
1265 self.record.syscall_events.push(syscall_event);
1266 }
1267
1268 fn alu_rr(&mut self, instruction: &Instruction) -> (Register, u32, u32) {
1270 if !instruction.imm_c {
1271 let (rd, rs1, rs2) = instruction.r_type();
1272 let c = self.rr_cpu(rs2, MemoryAccessPosition::C);
1273 let b = self.rr_cpu(rs1, MemoryAccessPosition::B);
1274 (rd, b, c)
1275 } else if !instruction.imm_b && instruction.imm_c {
1276 let (rd, rs1, imm) = instruction.i_type();
1277 let (rd, b, c) = (rd, self.rr_cpu(rs1, MemoryAccessPosition::B), imm);
1278 (rd, b, c)
1279 } else {
1280 debug_assert!(instruction.imm_b && instruction.imm_c);
1281 let (rd, b, c) =
1282 (Register::from_u8(instruction.op_a), instruction.op_b, instruction.op_c);
1283 (rd, b, c)
1284 }
1285 }
1286
1287 #[inline]
1289 fn alu_rw(&mut self, rd: Register, a: u32) {
1290 self.rw_cpu(rd, a);
1291 }
1292
1293 fn load_rr(&mut self, instruction: &Instruction) -> (Register, u32, u32, u32, u32) {
1295 let (rd, rs1, imm) = instruction.i_type();
1296 let (b, c) = (self.rr_cpu(rs1, MemoryAccessPosition::B), imm);
1297 let addr = b.wrapping_add(c);
1298 let memory_value = self.mr_cpu(align(addr));
1299 (rd, b, c, addr, memory_value)
1300 }
1301
1302 fn store_rr(&mut self, instruction: &Instruction) -> (u32, u32, u32, u32, u32) {
1304 let (rs1, rs2, imm) = instruction.s_type();
1305 let c = imm;
1306 let b = self.rr_cpu(rs2, MemoryAccessPosition::B);
1307 let a = self.rr_cpu(rs1, MemoryAccessPosition::A);
1308 let addr = b.wrapping_add(c);
1309 let memory_value = self.word(align(addr));
1310 (a, b, c, addr, memory_value)
1311 }
1312
1313 fn branch_rr(&mut self, instruction: &Instruction) -> (u32, u32, u32) {
1315 let (rs1, rs2, imm) = instruction.b_type();
1316 let c = imm;
1317 let b = self.rr_cpu(rs2, MemoryAccessPosition::B);
1318 let a = self.rr_cpu(rs1, MemoryAccessPosition::A);
1319 (a, b, c)
1320 }
1321
1322 #[inline]
1324 fn fetch(&self) -> Instruction {
1325 *self.program.fetch(self.state.pc)
1326 }
1327
1328 #[allow(clippy::too_many_lines)]
1330 fn execute_instruction(&mut self, instruction: &Instruction) -> Result<(), ExecutionError> {
1331 let mut clk = self.state.clk;
1334 let mut exit_code = 0u32;
1335 let mut next_pc = self.state.pc.wrapping_add(4);
1336 let (mut a, b, c): (u32, u32, u32);
1339
1340 if self.executor_mode == ExecutorMode::Trace {
1341 self.memory_accesses = MemoryAccessRecord::default();
1342 }
1343
1344 let mut syscall = SyscallCode::default();
1346
1347 if !self.unconstrained {
1348 if self.print_report {
1349 self.report.opcode_counts[instruction.opcode] += 1;
1350 }
1351 self.local_counts.event_counts[instruction.opcode] += 1;
1352 if instruction.is_memory_load_instruction() {
1353 self.local_counts.event_counts[Opcode::ADD] += 2;
1354 } else if instruction.is_jump_instruction() {
1355 self.local_counts.event_counts[Opcode::ADD] += 1;
1356 } else if instruction.is_branch_instruction() {
1357 self.local_counts.event_counts[Opcode::ADD] += 1;
1358 self.local_counts.event_counts[Opcode::SLTU] += 2;
1359 } else if instruction.is_divrem_instruction() {
1360 self.local_counts.event_counts[Opcode::MUL] += 2;
1361 self.local_counts.event_counts[Opcode::ADD] += 2;
1362 self.local_counts.event_counts[Opcode::SLTU] += 1;
1363 }
1364 }
1365
1366 if instruction.is_alu_instruction() {
1367 (a, b, c) = self.execute_alu(instruction);
1368 } else if instruction.is_memory_load_instruction() {
1369 (a, b, c) = self.execute_load(instruction)?;
1370 } else if instruction.is_memory_store_instruction() {
1371 (a, b, c) = self.execute_store(instruction)?;
1372 } else if instruction.is_branch_instruction() {
1373 (a, b, c, next_pc) = self.execute_branch(instruction, next_pc);
1374 } else if instruction.is_jump_instruction() {
1375 (a, b, c, next_pc) = self.execute_jump(instruction);
1376 } else if instruction.is_auipc_instruction() {
1377 let (rd, imm) = instruction.u_type();
1378 (b, c) = (imm, imm);
1379 a = self.state.pc.wrapping_add(b);
1380 self.rw_cpu(rd, a);
1381 } else if instruction.is_ecall_instruction() {
1382 (a, b, c, clk, next_pc, syscall, exit_code) = self.execute_ecall()?;
1383 } else if instruction.is_ebreak_instruction() {
1384 return Err(ExecutionError::Breakpoint());
1385 } else if instruction.is_unimp_instruction() {
1386 return Err(ExecutionError::Unimplemented());
1388 } else {
1389 eprintln!("unreachable: {:?}", instruction.opcode);
1390 unreachable!()
1391 }
1392
1393 let op_a_0 = instruction.op_a == Register::X0 as u8;
1395 if op_a_0 {
1396 a = 0;
1397 }
1398
1399 if self.executor_mode == ExecutorMode::Trace {
1401 self.emit_events(
1402 clk,
1403 next_pc,
1404 instruction,
1405 syscall,
1406 a,
1407 b,
1408 c,
1409 op_a_0,
1410 self.memory_accesses,
1411 exit_code,
1412 );
1413 }
1414
1415 self.state.pc = next_pc;
1417
1418 self.state.clk += 4;
1420
1421 Ok(())
1422 }
1423
1424 fn execute_alu(&mut self, instruction: &Instruction) -> (u32, u32, u32) {
1426 let (rd, b, c) = self.alu_rr(instruction);
1427 let a = match instruction.opcode {
1428 Opcode::ADD => b.wrapping_add(c),
1429 Opcode::SUB => b.wrapping_sub(c),
1430 Opcode::XOR => b ^ c,
1431 Opcode::OR => b | c,
1432 Opcode::AND => b & c,
1433 Opcode::SLL => b.wrapping_shl(c),
1434 Opcode::SRL => b.wrapping_shr(c),
1435 Opcode::SRA => (b as i32).wrapping_shr(c) as u32,
1436 Opcode::SLT => {
1437 if (b as i32) < (c as i32) {
1438 1
1439 } else {
1440 0
1441 }
1442 }
1443 Opcode::SLTU => {
1444 if b < c {
1445 1
1446 } else {
1447 0
1448 }
1449 }
1450 Opcode::MUL => b.wrapping_mul(c),
1451 Opcode::MULH => (((b as i32) as i64).wrapping_mul((c as i32) as i64) >> 32) as u32,
1452 Opcode::MULHU => ((b as u64).wrapping_mul(c as u64) >> 32) as u32,
1453 Opcode::MULHSU => (((b as i32) as i64).wrapping_mul(c as i64) >> 32) as u32,
1454 Opcode::DIV => {
1455 if c == 0 {
1456 u32::MAX
1457 } else {
1458 (b as i32).wrapping_div(c as i32) as u32
1459 }
1460 }
1461 Opcode::DIVU => {
1462 if c == 0 {
1463 u32::MAX
1464 } else {
1465 b.wrapping_div(c)
1466 }
1467 }
1468 Opcode::REM => {
1469 if c == 0 {
1470 b
1471 } else {
1472 (b as i32).wrapping_rem(c as i32) as u32
1473 }
1474 }
1475 Opcode::REMU => {
1476 if c == 0 {
1477 b
1478 } else {
1479 b.wrapping_rem(c)
1480 }
1481 }
1482 _ => unreachable!(),
1483 };
1484 self.alu_rw(rd, a);
1485 (a, b, c)
1486 }
1487
1488 fn execute_load(
1490 &mut self,
1491 instruction: &Instruction,
1492 ) -> Result<(u32, u32, u32), ExecutionError> {
1493 let (rd, b, c, addr, memory_read_value) = self.load_rr(instruction);
1494
1495 let a = match instruction.opcode {
1496 Opcode::LB => ((memory_read_value >> ((addr % 4) * 8)) & 0xFF) as i8 as i32 as u32,
1497 Opcode::LH => {
1498 if addr % 2 != 0 {
1499 return Err(ExecutionError::InvalidMemoryAccess(Opcode::LH, addr));
1500 }
1501 ((memory_read_value >> (((addr / 2) % 2) * 16)) & 0xFFFF) as i16 as i32 as u32
1502 }
1503 Opcode::LW => {
1504 if addr % 4 != 0 {
1505 return Err(ExecutionError::InvalidMemoryAccess(Opcode::LW, addr));
1506 }
1507 memory_read_value
1508 }
1509 Opcode::LBU => (memory_read_value >> ((addr % 4) * 8)) & 0xFF,
1510 Opcode::LHU => {
1511 if addr % 2 != 0 {
1512 return Err(ExecutionError::InvalidMemoryAccess(Opcode::LHU, addr));
1513 }
1514 (memory_read_value >> (((addr / 2) % 2) * 16)) & 0xFFFF
1515 }
1516 _ => unreachable!(),
1517 };
1518 self.rw_cpu(rd, a);
1519 Ok((a, b, c))
1520 }
1521
1522 fn execute_store(
1524 &mut self,
1525 instruction: &Instruction,
1526 ) -> Result<(u32, u32, u32), ExecutionError> {
1527 let (a, b, c, addr, memory_read_value) = self.store_rr(instruction);
1528
1529 let memory_store_value = match instruction.opcode {
1530 Opcode::SB => {
1531 let shift = (addr % 4) * 8;
1532 ((a & 0xFF) << shift) | (memory_read_value & !(0xFF << shift))
1533 }
1534 Opcode::SH => {
1535 if addr % 2 != 0 {
1536 return Err(ExecutionError::InvalidMemoryAccess(Opcode::SH, addr));
1537 }
1538 let shift = ((addr / 2) % 2) * 16;
1539 ((a & 0xFFFF) << shift) | (memory_read_value & !(0xFFFF << shift))
1540 }
1541 Opcode::SW => {
1542 if addr % 4 != 0 {
1543 return Err(ExecutionError::InvalidMemoryAccess(Opcode::SW, addr));
1544 }
1545 a
1546 }
1547 _ => unreachable!(),
1548 };
1549 self.mw_cpu(align(addr), memory_store_value);
1550 Ok((a, b, c))
1551 }
1552
1553 fn execute_branch(
1555 &mut self,
1556 instruction: &Instruction,
1557 mut next_pc: u32,
1558 ) -> (u32, u32, u32, u32) {
1559 let (a, b, c) = self.branch_rr(instruction);
1560 let branch = match instruction.opcode {
1561 Opcode::BEQ => a == b,
1562 Opcode::BNE => a != b,
1563 Opcode::BLT => (a as i32) < (b as i32),
1564 Opcode::BGE => (a as i32) >= (b as i32),
1565 Opcode::BLTU => a < b,
1566 Opcode::BGEU => a >= b,
1567 _ => {
1568 unreachable!()
1569 }
1570 };
1571 if branch {
1572 next_pc = self.state.pc.wrapping_add(c);
1573 }
1574 (a, b, c, next_pc)
1575 }
1576
1577 #[allow(clippy::type_complexity)]
1579 fn execute_ecall(
1580 &mut self,
1581 ) -> Result<(u32, u32, u32, u32, u32, SyscallCode, u32), ExecutionError> {
1582 let t0 = Register::X5;
1585 let syscall_id = self.register(t0);
1586 let c = self.rr_cpu(Register::X11, MemoryAccessPosition::C);
1587 let b = self.rr_cpu(Register::X10, MemoryAccessPosition::B);
1588 let syscall = SyscallCode::from_u32(syscall_id);
1589
1590 if self.print_report && !self.unconstrained {
1591 self.report.syscall_counts[syscall] += 1;
1592 }
1593
1594 if self.unconstrained &&
1601 (syscall != SyscallCode::EXIT_UNCONSTRAINED && syscall != SyscallCode::WRITE)
1602 {
1603 return Err(ExecutionError::InvalidSyscallUsage(syscall_id as u64));
1604 }
1605
1606 let syscall_for_count = syscall.count_map();
1608 let syscall_count = self.state.syscall_counts.entry(syscall_for_count).or_insert(0);
1609 *syscall_count += 1;
1610
1611 let syscall_impl = self.get_syscall(syscall).cloned();
1612 let mut precompile_rt = SyscallContext::new(self);
1613 let (a, precompile_next_pc, precompile_cycles, returned_exit_code) =
1614 if let Some(syscall_impl) = syscall_impl {
1615 let res = syscall_impl.execute(&mut precompile_rt, syscall, b, c);
1619 let a = if let Some(val) = res { val } else { syscall_id };
1620
1621 if syscall == SyscallCode::HALT && precompile_rt.exit_code != 0 {
1623 return Err(ExecutionError::HaltWithNonZeroExitCode(precompile_rt.exit_code));
1624 }
1625
1626 (a, precompile_rt.next_pc, syscall_impl.num_extra_cycles(), precompile_rt.exit_code)
1627 } else {
1628 return Err(ExecutionError::UnsupportedSyscall(syscall_id));
1629 };
1630
1631 if let (Some(estimator), Some(syscall_id)) =
1632 (&mut self.record_estimator, syscall.as_air_id())
1633 {
1634 let threshold = match syscall_id {
1635 RiscvAirId::ShaExtend => self.opts.split_opts.sha_extend,
1636 RiscvAirId::ShaCompress => self.opts.split_opts.sha_compress,
1637 RiscvAirId::KeccakPermute => self.opts.split_opts.keccak,
1638 _ => self.opts.split_opts.deferred,
1639 } as u64;
1640 let shards = &mut estimator.precompile_records[syscall_id];
1641 let local_memory_ct =
1642 estimator.current_precompile_touched_compressed_addresses.len() as u64;
1643 match shards.last_mut().filter(|shard| shard.0 < threshold) {
1644 Some((shard_precompile_event_ct, shard_local_memory_ct)) => {
1645 *shard_precompile_event_ct += 1;
1646 *shard_local_memory_ct += local_memory_ct;
1647 }
1648 None => shards.push((1, local_memory_ct)),
1649 }
1650 estimator.current_precompile_touched_compressed_addresses.clear();
1651 }
1652
1653 let (b, c) = if syscall == SyscallCode::EXIT_UNCONSTRAINED {
1657 (self.register(Register::X10), self.register(Register::X11))
1658 } else {
1659 (b, c)
1660 };
1661
1662 self.rw_cpu(t0, a);
1664 let clk = self.state.clk;
1665 self.state.clk += precompile_cycles;
1666
1667 Ok((a, b, c, clk, precompile_next_pc, syscall, returned_exit_code))
1668 }
1669
1670 fn execute_jump(&mut self, instruction: &Instruction) -> (u32, u32, u32, u32) {
1672 let (a, b, c, next_pc) = match instruction.opcode {
1673 Opcode::JAL => {
1674 let (rd, imm) = instruction.j_type();
1675 let (b, c) = (imm, 0);
1676 let a = self.state.pc + 4;
1677 self.rw_cpu(rd, a);
1678 let next_pc = self.state.pc.wrapping_add(imm);
1679 (a, b, c, next_pc)
1680 }
1681 Opcode::JALR => {
1682 let (rd, rs1, imm) = instruction.i_type();
1683 let (b, c) = (self.rr_cpu(rs1, MemoryAccessPosition::B), imm);
1684 let a = self.state.pc + 4;
1685 self.rw_cpu(rd, a);
1686 let next_pc = b.wrapping_add(c);
1687 (a, b, c, next_pc)
1688 }
1689 _ => unreachable!(),
1690 };
1691 (a, b, c, next_pc)
1692 }
1693
1694 #[inline]
1696 #[allow(clippy::too_many_lines)]
1697 fn execute_cycle(&mut self) -> Result<bool, ExecutionError> {
1698 let instruction = self.fetch();
1700
1701 self.log(&instruction);
1703
1704 self.execute_instruction(&instruction)?;
1706
1707 self.state.global_clk += 1;
1709
1710 if self.unconstrained {
1711 self.unconstrained_state.total_unconstrained_cycles += 1;
1712 }
1713
1714 if !self.unconstrained {
1715 let cpu_exit = self.max_syscall_cycles + self.state.clk >= self.shard_size;
1717
1718 let mut shape_match_found = true;
1722 if self.state.global_clk.is_multiple_of(self.shape_check_frequency) {
1723 Self::estimate_riscv_event_counts(
1725 &mut self.event_counts,
1726 (self.state.clk >> 2) as u64,
1727 &self.local_counts,
1728 );
1729
1730 if self.lde_size_check {
1732 let padded_event_counts =
1733 pad_rv32im_event_counts(self.event_counts, self.shape_check_frequency);
1734 let padded_lde_size = estimate_riscv_lde_size(padded_event_counts, &self.costs);
1735 if padded_lde_size > self.lde_size_threshold {
1736 #[allow(clippy::cast_precision_loss)]
1737 let size_gib = (padded_lde_size as f64) / (1 << 9) as f64;
1738 tracing::warn!(
1739 "Stopping shard early since the estimated LDE size is too large: {:.3} GiB",
1740 size_gib
1741 );
1742 shape_match_found = false;
1743 }
1744 }
1745 else if let Some(maximal_shapes) = &self.maximal_shapes {
1747 let distance = |threshold: usize, count: usize| {
1748 if count != 0 {
1749 threshold - count
1750 } else {
1751 usize::MAX
1752 }
1753 };
1754
1755 shape_match_found = false;
1756
1757 for shape in maximal_shapes.iter() {
1758 let cpu_threshold = shape[CoreAirId::Cpu];
1759 if self.state.clk > ((1 << cpu_threshold) << 2) {
1760 continue;
1761 }
1762
1763 let mut l_infinity = usize::MAX;
1764 let mut shape_too_small = false;
1765 for air in CoreAirId::iter() {
1766 if air == CoreAirId::Cpu {
1767 continue;
1768 }
1769
1770 let threshold = 1 << shape[air];
1771 let count = self.event_counts[RiscvAirId::from(air)] as usize;
1772 if count > threshold {
1773 shape_too_small = true;
1774 break;
1775 }
1776
1777 if distance(threshold, count) < l_infinity {
1778 l_infinity = distance(threshold, count);
1779 }
1780 }
1781
1782 if shape_too_small {
1783 continue;
1784 }
1785
1786 if l_infinity >= 32 * (self.shape_check_frequency as usize) {
1787 shape_match_found = true;
1788 break;
1789 }
1790 }
1791
1792 if !shape_match_found {
1793 self.record.counts = Some(self.event_counts);
1794 tracing::debug!(
1795 "Stopping shard {} to stay within some maximal shape. clk = {} pc = 0x{:x?}",
1796 self.shard(),
1797 self.state.global_clk,
1798 self.state.pc,
1799 );
1800 }
1801 }
1802 }
1803
1804 if cpu_exit || !shape_match_found {
1805 self.bump_record();
1806 self.state.current_shard += 1;
1807 self.state.clk = 0;
1808 }
1809
1810 if let Some(max_cycles) = self.max_cycles {
1812 if self.state.global_clk > max_cycles {
1813 return Err(ExecutionError::ExceededCycleLimit(max_cycles));
1814 }
1815 }
1816 }
1817
1818 let done = self.state.pc == 0 ||
1819 self.state.pc.wrapping_sub(self.program.pc_base) >=
1820 (self.program.instructions.len() * 4) as u32;
1821 if done && self.unconstrained {
1822 tracing::error!("program ended in unconstrained mode at clk {}", self.state.global_clk);
1823 return Err(ExecutionError::EndInUnconstrained());
1824 }
1825 Ok(done)
1826 }
1827
1828 pub fn bump_record(&mut self) {
1830 if let Some(estimator) = &mut self.record_estimator {
1831 self.local_counts.local_mem = std::mem::take(&mut estimator.current_local_mem);
1832 Self::estimate_riscv_event_counts(
1833 &mut self.event_counts,
1834 (self.state.clk >> 2) as u64,
1835 &self.local_counts,
1836 );
1837 estimator.core_records.push(self.event_counts);
1839 estimator.current_touched_compressed_addresses.clear();
1840 }
1841 self.local_counts = LocalCounts::default();
1842 if self.executor_mode == ExecutorMode::Trace {
1844 for (_, event) in self.local_memory_access.drain() {
1845 self.record.cpu_local_memory_access.push(event);
1846 }
1847 }
1848
1849 let removed_record = std::mem::replace(
1850 &mut self.record,
1851 Box::new(ExecutionRecord::new(self.program.clone())),
1852 );
1853 let public_values = removed_record.public_values;
1854 self.record.public_values = public_values;
1855 self.records.push(removed_record);
1856 }
1857
1858 pub fn execute_record(
1865 &mut self,
1866 emit_global_memory_events: bool,
1867 ) -> Result<(Vec<Box<ExecutionRecord>>, bool), ExecutionError> {
1868 self.executor_mode = ExecutorMode::Trace;
1869 self.emit_global_memory_events = emit_global_memory_events;
1870 self.print_report = true;
1871 let done = self.execute()?;
1872 Ok((std::mem::take(&mut self.records), done))
1873 }
1874
1875 pub fn execute_state(
1882 &mut self,
1883 emit_global_memory_events: bool,
1884 ) -> Result<(ExecutionState, PublicValues<u32, u32>, bool), ExecutionError> {
1885 self.memory_checkpoint.clear();
1886 self.executor_mode = ExecutorMode::Checkpoint;
1887 self.emit_global_memory_events = emit_global_memory_events;
1888
1889 let memory = std::mem::take(&mut self.state.memory);
1891 let uninitialized_memory = std::mem::take(&mut self.state.uninitialized_memory);
1892 let proof_stream = std::mem::take(&mut self.state.proof_stream);
1893 let mut checkpoint = tracing::debug_span!("clone").in_scope(|| self.state.clone());
1894 self.state.memory = memory;
1895 self.state.uninitialized_memory = uninitialized_memory;
1896 self.state.proof_stream = proof_stream;
1897
1898 let done = tracing::debug_span!("execute").in_scope(|| self.execute())?;
1899 let next_pc = self.state.pc;
1902 tracing::debug_span!("create memory checkpoint").in_scope(|| {
1903 let replacement_memory_checkpoint = Memory::<_>::new_preallocated();
1904 let replacement_uninitialized_memory_checkpoint = Memory::<_>::new_preallocated();
1905 let memory_checkpoint =
1906 std::mem::replace(&mut self.memory_checkpoint, replacement_memory_checkpoint);
1907 let uninitialized_memory_checkpoint = std::mem::replace(
1908 &mut self.uninitialized_memory_checkpoint,
1909 replacement_uninitialized_memory_checkpoint,
1910 );
1911 if done && !self.emit_global_memory_events {
1912 checkpoint.memory.clone_from(&self.state.memory);
1916 memory_checkpoint.into_iter().for_each(|(addr, record)| {
1917 if let Some(record) = record {
1918 checkpoint.memory.insert(addr, record);
1919 } else {
1920 checkpoint.memory.remove(addr);
1921 }
1922 });
1923 checkpoint.uninitialized_memory = self.state.uninitialized_memory.clone();
1924 for (addr, is_old) in uninitialized_memory_checkpoint {
1926 if !is_old {
1927 checkpoint.uninitialized_memory.remove(addr);
1928 }
1929 }
1930 } else {
1931 checkpoint.memory = memory_checkpoint
1932 .into_iter()
1933 .filter_map(|(addr, record)| record.map(|record| (addr, record)))
1934 .collect();
1935 checkpoint.uninitialized_memory = uninitialized_memory_checkpoint
1936 .into_iter()
1937 .filter(|&(_, has_value)| has_value)
1938 .map(|(addr, _)| (addr, *self.state.uninitialized_memory.get(addr).unwrap()))
1939 .collect();
1940 }
1941 });
1942 let mut public_values = self.records.last().as_ref().unwrap().public_values;
1943 public_values.start_pc = next_pc;
1944 public_values.next_pc = next_pc;
1945 if !done {
1946 self.records.clear();
1947 }
1948 Ok((checkpoint, public_values, done))
1949 }
1950
1951 fn initialize(&mut self) {
1952 self.state.clk = 0;
1953
1954 tracing::debug!("loading memory image");
1955 for (&addr, value) in &self.program.memory_image {
1956 self.state.memory.insert(addr, MemoryRecord { value: *value, shard: 0, timestamp: 0 });
1957 }
1958 self.state.memory.insert(0, MemoryRecord { value: 0, shard: 0, timestamp: 0 });
1960 }
1961
1962 pub fn run_fast(&mut self) -> Result<(), ExecutionError> {
1968 self.executor_mode = ExecutorMode::Simple;
1969 self.print_report = true;
1970 while !self.execute()? {}
1971
1972 #[cfg(feature = "profiling")]
1973 if let Some((profiler, writer)) = self.profiler.take() {
1974 profiler.write(writer).expect("Failed to write profile to output file");
1975 }
1976
1977 Ok(())
1978 }
1979
1980 pub fn run_checkpoint(
1986 &mut self,
1987 emit_global_memory_events: bool,
1988 ) -> Result<(), ExecutionError> {
1989 self.executor_mode = ExecutorMode::Simple;
1990 self.print_report = true;
1991 while !self.execute_state(emit_global_memory_events)?.2 {}
1992 Ok(())
1993 }
1994
1995 pub fn run(&mut self) -> Result<(), ExecutionError> {
2001 self.executor_mode = ExecutorMode::Trace;
2002 self.print_report = true;
2003 while !self.execute()? {}
2004
2005 #[cfg(feature = "profiling")]
2006 if let Some((profiler, writer)) = self.profiler.take() {
2007 profiler.write(writer).expect("Failed to write profile to output file");
2008 }
2009
2010 Ok(())
2011 }
2012
2013 pub fn execute(&mut self) -> Result<bool, ExecutionError> {
2016 let program = self.program.clone();
2018
2019 let start_shard = self.state.current_shard;
2021
2022 if self.state.global_clk == 0 {
2024 self.initialize();
2025 }
2026
2027 let unconstrained_cycle_limit =
2028 std::env::var("UNCONSTRAINED_CYCLE_LIMIT").map(|v| v.parse::<u64>().unwrap()).ok();
2029
2030 let mut done = false;
2033 let mut current_shard = self.state.current_shard;
2034 let mut num_shards_executed = 0;
2035 loop {
2036 if self.execute_cycle()? {
2037 done = true;
2038 break;
2039 }
2040
2041 if let Some(unconstrained_cycle_limit) = unconstrained_cycle_limit {
2043 if self.unconstrained_state.total_unconstrained_cycles > unconstrained_cycle_limit {
2044 return Err(ExecutionError::UnconstrainedCycleLimitExceeded(
2045 unconstrained_cycle_limit,
2046 ));
2047 }
2048 }
2049
2050 if self.shard_batch_size > 0 && current_shard != self.state.current_shard {
2051 num_shards_executed += 1;
2052 current_shard = self.state.current_shard;
2053 if num_shards_executed == self.shard_batch_size {
2054 break;
2055 }
2056 }
2057 }
2058
2059 let public_values = self.record.public_values;
2061
2062 if done {
2063 self.postprocess();
2064
2065 self.bump_record();
2067
2068 if let Some(ref mut w) = self.io_options.stdout {
2070 if let Err(e) = w.flush() {
2071 tracing::error!("failed to flush stdout override: {e}");
2072 }
2073 }
2074
2075 if let Some(ref mut w) = self.io_options.stderr {
2076 if let Err(e) = w.flush() {
2077 tracing::error!("failed to flush stderr override: {e}");
2078 }
2079 }
2080 }
2081
2082 if !self.record.cpu_events.is_empty() {
2084 self.bump_record();
2085 }
2086
2087 let mut last_next_pc = 0;
2089 let mut last_exit_code = 0;
2090 for (i, record) in self.records.iter_mut().enumerate() {
2091 record.program = program.clone();
2092 record.public_values = public_values;
2093 record.public_values.committed_value_digest = public_values.committed_value_digest;
2094 record.public_values.deferred_proofs_digest = public_values.deferred_proofs_digest;
2095 record.public_values.execution_shard = start_shard + i as u32;
2096 if record.cpu_events.is_empty() {
2097 record.public_values.start_pc = last_next_pc;
2098 record.public_values.next_pc = last_next_pc;
2099 record.public_values.exit_code = last_exit_code;
2100 } else {
2101 record.public_values.start_pc = record.cpu_events[0].pc;
2102 record.public_values.next_pc = record.cpu_events.last().unwrap().next_pc;
2103 record.public_values.exit_code = record.cpu_events.last().unwrap().exit_code;
2104 last_next_pc = record.public_values.next_pc;
2105 last_exit_code = record.public_values.exit_code;
2106 }
2107 }
2108
2109 Ok(done)
2110 }
2111
2112 fn postprocess(&mut self) {
2113 for (fd, buf) in &self.io_buf {
2115 if !buf.is_empty() {
2116 match fd {
2117 1 => {
2118 eprintln!("stdout: {buf}");
2119 }
2120 2 => {
2121 eprintln!("stderr: {buf}");
2122 }
2123 _ => {}
2124 }
2125 }
2126 }
2127
2128 if self.state.proof_stream_ptr != self.state.proof_stream.len() {
2130 tracing::warn!(
2131 "Not all proofs were read. Proving will fail during recursion. Did you pass too
2132 many proofs in or forget to call verify_sp1_proof?"
2133 );
2134 }
2135
2136 if !self.state.input_stream.is_empty() {
2137 tracing::warn!("Not all input bytes were read.");
2138 }
2139
2140 if let Some(estimator) = &mut self.record_estimator {
2141 let touched_reg_ct =
2145 1 + (1..32).filter(|&r| self.state.memory.registers.get(r).is_some()).count();
2146 let total_mem = touched_reg_ct + self.state.memory.page_table.exact_len();
2147 estimator.memory_global_init_events = total_mem
2151 .checked_sub(self.record.program.memory_image.len())
2152 .expect("program memory image should be accounted for in memory exact len")
2153 as u64;
2154 estimator.memory_global_finalize_events = total_mem as u64;
2155 }
2156
2157 if self.emit_global_memory_events &&
2158 (self.executor_mode == ExecutorMode::Trace ||
2159 self.executor_mode == ExecutorMode::Checkpoint)
2160 {
2161 let memory_finalize_events = &mut self.record.global_memory_finalize_events;
2163 memory_finalize_events.reserve_exact(self.state.memory.page_table.estimate_len() + 32);
2164
2165 let addr_0_record = self.state.memory.get(0);
2168
2169 let addr_0_final_record = match addr_0_record {
2170 Some(record) => record,
2171 None => &MemoryRecord { value: 0, shard: 0, timestamp: 0 },
2172 };
2173 memory_finalize_events
2174 .push(MemoryInitializeFinalizeEvent::finalize_from_record(0, addr_0_final_record));
2175
2176 let memory_initialize_events = &mut self.record.global_memory_initialize_events;
2177 memory_initialize_events
2178 .reserve_exact(self.state.memory.page_table.estimate_len() + 32);
2179 let addr_0_initialize_event = MemoryInitializeFinalizeEvent::initialize(0, 0);
2180 memory_initialize_events.push(addr_0_initialize_event);
2181
2182 if self.print_report {
2185 self.report.touched_memory_addresses = 0;
2186 }
2187 for addr in 1..32 {
2188 let record = self.state.memory.registers.get(addr);
2189 if let Some(record) = record {
2190 if self.print_report {
2191 self.report.touched_memory_addresses += 1;
2192 }
2193 if !self.record.program.memory_image.contains_key(&addr) {
2197 let initial_value =
2198 self.state.uninitialized_memory.registers.get(addr).unwrap_or(&0);
2199 memory_initialize_events
2200 .push(MemoryInitializeFinalizeEvent::initialize(addr, *initial_value));
2201 }
2202
2203 memory_finalize_events
2204 .push(MemoryInitializeFinalizeEvent::finalize_from_record(addr, record));
2205 }
2206 }
2207 for addr in self.state.memory.page_table.keys() {
2208 if self.print_report {
2209 self.report.touched_memory_addresses += 1;
2210 }
2211
2212 if !self.record.program.memory_image.contains_key(&addr) {
2215 let initial_value = self.state.uninitialized_memory.get(addr).unwrap_or(&0);
2216 memory_initialize_events
2217 .push(MemoryInitializeFinalizeEvent::initialize(addr, *initial_value));
2218 }
2219
2220 let record = *self.state.memory.get(addr).unwrap();
2221 memory_finalize_events
2222 .push(MemoryInitializeFinalizeEvent::finalize_from_record(addr, &record));
2223 }
2224 }
2225 }
2226
2227 fn get_syscall(&mut self, code: SyscallCode) -> Option<&Arc<dyn Syscall>> {
2228 self.syscall_map.get(&code)
2229 }
2230
2231 fn estimate_riscv_event_counts(
2233 event_counts: &mut EnumMap<RiscvAirId, u64>,
2234 cpu_cycles: u64,
2235 local_counts: &LocalCounts,
2236 ) {
2237 let touched_addresses: u64 = local_counts.local_mem as u64;
2238 let syscalls_sent: u64 = local_counts.syscalls_sent as u64;
2239 let opcode_counts: &EnumMap<Opcode, u64> = &local_counts.event_counts;
2240
2241 event_counts[RiscvAirId::Cpu] = cpu_cycles;
2243
2244 event_counts[RiscvAirId::AddSub] = opcode_counts[Opcode::ADD] + opcode_counts[Opcode::SUB];
2246
2247 event_counts[RiscvAirId::Mul] = opcode_counts[Opcode::MUL] +
2249 opcode_counts[Opcode::MULH] +
2250 opcode_counts[Opcode::MULHU] +
2251 opcode_counts[Opcode::MULHSU];
2252
2253 event_counts[RiscvAirId::Bitwise] =
2255 opcode_counts[Opcode::XOR] + opcode_counts[Opcode::OR] + opcode_counts[Opcode::AND];
2256
2257 event_counts[RiscvAirId::ShiftLeft] = opcode_counts[Opcode::SLL];
2259
2260 event_counts[RiscvAirId::ShiftRight] =
2262 opcode_counts[Opcode::SRL] + opcode_counts[Opcode::SRA];
2263
2264 event_counts[RiscvAirId::DivRem] = opcode_counts[Opcode::DIV] +
2266 opcode_counts[Opcode::DIVU] +
2267 opcode_counts[Opcode::REM] +
2268 opcode_counts[Opcode::REMU];
2269
2270 event_counts[RiscvAirId::Lt] = opcode_counts[Opcode::SLT] + opcode_counts[Opcode::SLTU];
2272
2273 event_counts[RiscvAirId::MemoryLocal] =
2275 touched_addresses.div_ceil(NUM_LOCAL_MEMORY_ENTRIES_PER_ROW_EXEC as u64);
2276
2277 event_counts[RiscvAirId::Branch] = opcode_counts[Opcode::BEQ] +
2279 opcode_counts[Opcode::BNE] +
2280 opcode_counts[Opcode::BLT] +
2281 opcode_counts[Opcode::BGE] +
2282 opcode_counts[Opcode::BLTU] +
2283 opcode_counts[Opcode::BGEU];
2284
2285 event_counts[RiscvAirId::Jump] = opcode_counts[Opcode::JAL] + opcode_counts[Opcode::JALR];
2287
2288 event_counts[RiscvAirId::Auipc] = opcode_counts[Opcode::AUIPC] +
2290 opcode_counts[Opcode::UNIMP] +
2291 opcode_counts[Opcode::EBREAK];
2292
2293 event_counts[RiscvAirId::MemoryInstrs] = opcode_counts[Opcode::LB] +
2295 opcode_counts[Opcode::LH] +
2296 opcode_counts[Opcode::LW] +
2297 opcode_counts[Opcode::LBU] +
2298 opcode_counts[Opcode::LHU] +
2299 opcode_counts[Opcode::SB] +
2300 opcode_counts[Opcode::SH] +
2301 opcode_counts[Opcode::SW];
2302
2303 event_counts[RiscvAirId::SyscallInstrs] = opcode_counts[Opcode::ECALL];
2305
2306 event_counts[RiscvAirId::SyscallCore] = syscalls_sent;
2308
2309 event_counts[RiscvAirId::Global] =
2311 2 * touched_addresses + event_counts[RiscvAirId::SyscallInstrs];
2312
2313 event_counts[RiscvAirId::Mul] += event_counts[RiscvAirId::DivRem];
2315 event_counts[RiscvAirId::Lt] += event_counts[RiscvAirId::DivRem];
2316
2317 }
2320
2321 #[inline]
2322 fn log(&mut self, _: &Instruction) {
2323 #[cfg(feature = "profiling")]
2324 if let Some((ref mut profiler, _)) = self.profiler {
2325 if !self.unconstrained {
2326 profiler.record(self.state.global_clk, self.state.pc as u64);
2327 }
2328 }
2329
2330 if !self.unconstrained && self.state.global_clk.is_multiple_of(10_000_000) {
2331 tracing::info!("clk = {} pc = 0x{:x?}", self.state.global_clk, self.state.pc);
2332 }
2333 }
2334}
2335
2336#[must_use]
2338pub const fn align(addr: u32) -> u32 {
2339 addr - addr % 4
2340}
2341
2342#[cfg(test)]
2343mod tests {
2344
2345 use sp1_stark::SP1CoreOpts;
2346 use sp1_zkvm::syscalls::SHA_COMPRESS;
2347
2348 use crate::programs::tests::{
2349 fibonacci_program, panic_program, secp256r1_add_program, secp256r1_double_program,
2350 simple_memory_program, simple_program, ssz_withdrawals_program, u256xu2048_mul_program,
2351 };
2352
2353 use crate::{Register, SP1Context};
2354
2355 use super::{Executor, Instruction, Opcode, Program};
2356
2357 fn _assert_send<T: Send>() {}
2358
2359 fn _assert_runtime_is_send() {
2361 #[allow(clippy::used_underscore_items)]
2362 _assert_send::<Executor>();
2363 }
2364
2365 #[test]
2366 fn test_simple_program_run() {
2367 let program = simple_program();
2368 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2369 runtime.run().unwrap();
2370 assert_eq!(runtime.register(Register::X31), 42);
2371 }
2372
2373 #[test]
2374 fn test_fibonacci_program_run() {
2375 let program = fibonacci_program();
2376 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2377 runtime.run().unwrap();
2378 }
2379
2380 #[test]
2381 fn test_fibonacci_program_run_with_max_cycles() {
2382 let program = fibonacci_program();
2383 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2384 runtime.run().unwrap();
2385
2386 let max_cycles = runtime.state.global_clk;
2387
2388 let program = fibonacci_program();
2389 let context = SP1Context::builder().max_cycles(max_cycles).build();
2390 let mut runtime = Executor::with_context(program, SP1CoreOpts::default(), context);
2391 runtime.run().unwrap();
2392 }
2393
2394 #[test]
2395 fn test_secp256r1_add_program_run() {
2396 let program = secp256r1_add_program();
2397 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2398 runtime.run().unwrap();
2399 }
2400
2401 #[test]
2402 fn test_secp256r1_double_program_run() {
2403 let program = secp256r1_double_program();
2404 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2405 runtime.run().unwrap();
2406 }
2407
2408 #[test]
2409 fn test_u256xu2048_mul() {
2410 let program = u256xu2048_mul_program();
2411 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2412 runtime.run().unwrap();
2413 }
2414
2415 #[test]
2416 fn test_ssz_withdrawals_program_run() {
2417 let program = ssz_withdrawals_program();
2418 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2419 runtime.run().unwrap();
2420 }
2421
2422 #[test]
2423 #[should_panic]
2424 fn test_panic() {
2425 let program = panic_program();
2426 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2427 runtime.run().unwrap();
2428 }
2429
2430 #[test]
2431 fn test_add() {
2432 let instructions = vec![
2437 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2438 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2439 Instruction::new(Opcode::ADD, 31, 30, 29, false, false),
2440 ];
2441 let program = Program::new(instructions, 0, 0);
2442 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2443 runtime.run().unwrap();
2444 assert_eq!(runtime.register(Register::X31), 42);
2445 }
2446
2447 #[test]
2448 fn test_sub() {
2449 let instructions = vec![
2453 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2454 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2455 Instruction::new(Opcode::SUB, 31, 30, 29, false, false),
2456 ];
2457 let program = Program::new(instructions, 0, 0);
2458
2459 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2460 runtime.run().unwrap();
2461 assert_eq!(runtime.register(Register::X31), 32);
2462 }
2463
2464 #[test]
2465 fn test_xor() {
2466 let instructions = vec![
2470 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2471 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2472 Instruction::new(Opcode::XOR, 31, 30, 29, false, false),
2473 ];
2474 let program = Program::new(instructions, 0, 0);
2475
2476 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2477 runtime.run().unwrap();
2478 assert_eq!(runtime.register(Register::X31), 32);
2479 }
2480
2481 #[test]
2482 fn test_or() {
2483 let instructions = vec![
2487 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2488 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2489 Instruction::new(Opcode::OR, 31, 30, 29, false, false),
2490 ];
2491 let program = Program::new(instructions, 0, 0);
2492
2493 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2494
2495 runtime.run().unwrap();
2496 assert_eq!(runtime.register(Register::X31), 37);
2497 }
2498
2499 #[test]
2500 fn test_and() {
2501 let instructions = vec![
2505 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2506 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2507 Instruction::new(Opcode::AND, 31, 30, 29, false, false),
2508 ];
2509 let program = Program::new(instructions, 0, 0);
2510
2511 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2512 runtime.run().unwrap();
2513 assert_eq!(runtime.register(Register::X31), 5);
2514 }
2515
2516 #[test]
2517 fn test_sll() {
2518 let instructions = vec![
2522 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2523 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2524 Instruction::new(Opcode::SLL, 31, 30, 29, false, false),
2525 ];
2526 let program = Program::new(instructions, 0, 0);
2527
2528 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2529 runtime.run().unwrap();
2530 assert_eq!(runtime.register(Register::X31), 1184);
2531 }
2532
2533 #[test]
2534 fn test_srl() {
2535 let instructions = vec![
2539 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2540 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2541 Instruction::new(Opcode::SRL, 31, 30, 29, false, false),
2542 ];
2543 let program = Program::new(instructions, 0, 0);
2544
2545 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2546 runtime.run().unwrap();
2547 assert_eq!(runtime.register(Register::X31), 1);
2548 }
2549
2550 #[test]
2551 fn test_sra() {
2552 let instructions = vec![
2556 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2557 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2558 Instruction::new(Opcode::SRA, 31, 30, 29, false, false),
2559 ];
2560 let program = Program::new(instructions, 0, 0);
2561
2562 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2563 runtime.run().unwrap();
2564 assert_eq!(runtime.register(Register::X31), 1);
2565 }
2566
2567 #[test]
2568 fn test_slt() {
2569 let instructions = vec![
2573 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2574 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2575 Instruction::new(Opcode::SLT, 31, 30, 29, false, false),
2576 ];
2577 let program = Program::new(instructions, 0, 0);
2578
2579 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2580 runtime.run().unwrap();
2581 assert_eq!(runtime.register(Register::X31), 0);
2582 }
2583
2584 #[test]
2585 fn test_sltu() {
2586 let instructions = vec![
2590 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2591 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2592 Instruction::new(Opcode::SLTU, 31, 30, 29, false, false),
2593 ];
2594 let program = Program::new(instructions, 0, 0);
2595
2596 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2597 runtime.run().unwrap();
2598 assert_eq!(runtime.register(Register::X31), 0);
2599 }
2600
2601 #[test]
2602 fn test_addi() {
2603 let instructions = vec![
2607 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2608 Instruction::new(Opcode::ADD, 30, 29, 37, false, true),
2609 Instruction::new(Opcode::ADD, 31, 30, 42, false, true),
2610 ];
2611 let program = Program::new(instructions, 0, 0);
2612
2613 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2614 runtime.run().unwrap();
2615 assert_eq!(runtime.register(Register::X31), 84);
2616 }
2617
2618 #[test]
2619 fn test_addi_negative() {
2620 let instructions = vec![
2624 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2625 Instruction::new(Opcode::ADD, 30, 29, 0xFFFF_FFFF, false, true),
2626 Instruction::new(Opcode::ADD, 31, 30, 4, false, true),
2627 ];
2628 let program = Program::new(instructions, 0, 0);
2629 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2630 runtime.run().unwrap();
2631 assert_eq!(runtime.register(Register::X31), 5 - 1 + 4);
2632 }
2633
2634 #[test]
2635 fn test_xori() {
2636 let instructions = vec![
2640 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2641 Instruction::new(Opcode::XOR, 30, 29, 37, false, true),
2642 Instruction::new(Opcode::XOR, 31, 30, 42, false, true),
2643 ];
2644 let program = Program::new(instructions, 0, 0);
2645 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2646 runtime.run().unwrap();
2647 assert_eq!(runtime.register(Register::X31), 10);
2648 }
2649
2650 #[test]
2651 fn test_ori() {
2652 let instructions = vec![
2656 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2657 Instruction::new(Opcode::OR, 30, 29, 37, false, true),
2658 Instruction::new(Opcode::OR, 31, 30, 42, false, true),
2659 ];
2660 let program = Program::new(instructions, 0, 0);
2661 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2662 runtime.run().unwrap();
2663 assert_eq!(runtime.register(Register::X31), 47);
2664 }
2665
2666 #[test]
2667 fn test_andi() {
2668 let instructions = vec![
2672 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2673 Instruction::new(Opcode::AND, 30, 29, 37, false, true),
2674 Instruction::new(Opcode::AND, 31, 30, 42, false, true),
2675 ];
2676 let program = Program::new(instructions, 0, 0);
2677 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2678 runtime.run().unwrap();
2679 assert_eq!(runtime.register(Register::X31), 0);
2680 }
2681
2682 #[test]
2683 fn test_slli() {
2684 let instructions = vec![
2687 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2688 Instruction::new(Opcode::SLL, 31, 29, 4, false, true),
2689 ];
2690 let program = Program::new(instructions, 0, 0);
2691 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2692 runtime.run().unwrap();
2693 assert_eq!(runtime.register(Register::X31), 80);
2694 }
2695
2696 #[test]
2697 fn test_srli() {
2698 let instructions = vec![
2701 Instruction::new(Opcode::ADD, 29, 0, 42, false, true),
2702 Instruction::new(Opcode::SRL, 31, 29, 4, false, true),
2703 ];
2704 let program = Program::new(instructions, 0, 0);
2705 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2706 runtime.run().unwrap();
2707 assert_eq!(runtime.register(Register::X31), 2);
2708 }
2709
2710 #[test]
2711 fn test_srai() {
2712 let instructions = vec![
2715 Instruction::new(Opcode::ADD, 29, 0, 42, false, true),
2716 Instruction::new(Opcode::SRA, 31, 29, 4, false, true),
2717 ];
2718 let program = Program::new(instructions, 0, 0);
2719 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2720 runtime.run().unwrap();
2721 assert_eq!(runtime.register(Register::X31), 2);
2722 }
2723
2724 #[test]
2725 fn test_slti() {
2726 let instructions = vec![
2729 Instruction::new(Opcode::ADD, 29, 0, 42, false, true),
2730 Instruction::new(Opcode::SLT, 31, 29, 37, false, true),
2731 ];
2732 let program = Program::new(instructions, 0, 0);
2733 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2734 runtime.run().unwrap();
2735 assert_eq!(runtime.register(Register::X31), 0);
2736 }
2737
2738 #[test]
2739 fn test_sltiu() {
2740 let instructions = vec![
2743 Instruction::new(Opcode::ADD, 29, 0, 42, false, true),
2744 Instruction::new(Opcode::SLTU, 31, 29, 37, false, true),
2745 ];
2746 let program = Program::new(instructions, 0, 0);
2747 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2748 runtime.run().unwrap();
2749 assert_eq!(runtime.register(Register::X31), 0);
2750 }
2751
2752 #[test]
2753 fn test_jalr() {
2754 let instructions = vec![
2762 Instruction::new(Opcode::ADD, 11, 11, 100, false, true),
2763 Instruction::new(Opcode::JALR, 5, 11, 8, false, true),
2764 ];
2765 let program = Program::new(instructions, 0, 0);
2766 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2767 runtime.run().unwrap();
2768 assert_eq!(runtime.registers()[Register::X5 as usize], 8);
2769 assert_eq!(runtime.registers()[Register::X11 as usize], 100);
2770 assert_eq!(runtime.state.pc, 108);
2771 }
2772
2773 fn simple_op_code_test(opcode: Opcode, expected: u32, a: u32, b: u32) {
2774 let instructions = vec![
2775 Instruction::new(Opcode::ADD, 10, 0, a, false, true),
2776 Instruction::new(Opcode::ADD, 11, 0, b, false, true),
2777 Instruction::new(opcode, 12, 10, 11, false, false),
2778 ];
2779 let program = Program::new(instructions, 0, 0);
2780 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2781 runtime.run().unwrap();
2782 assert_eq!(runtime.registers()[Register::X12 as usize], expected);
2783 }
2784
2785 #[test]
2786 #[allow(clippy::unreadable_literal)]
2787 fn multiplication_tests() {
2788 simple_op_code_test(Opcode::MULHU, 0x00000000, 0x00000000, 0x00000000);
2789 simple_op_code_test(Opcode::MULHU, 0x00000000, 0x00000001, 0x00000001);
2790 simple_op_code_test(Opcode::MULHU, 0x00000000, 0x00000003, 0x00000007);
2791 simple_op_code_test(Opcode::MULHU, 0x00000000, 0x00000000, 0xffff8000);
2792 simple_op_code_test(Opcode::MULHU, 0x00000000, 0x80000000, 0x00000000);
2793 simple_op_code_test(Opcode::MULHU, 0x7fffc000, 0x80000000, 0xffff8000);
2794 simple_op_code_test(Opcode::MULHU, 0x0001fefe, 0xaaaaaaab, 0x0002fe7d);
2795 simple_op_code_test(Opcode::MULHU, 0x0001fefe, 0x0002fe7d, 0xaaaaaaab);
2796 simple_op_code_test(Opcode::MULHU, 0xfe010000, 0xff000000, 0xff000000);
2797 simple_op_code_test(Opcode::MULHU, 0xfffffffe, 0xffffffff, 0xffffffff);
2798 simple_op_code_test(Opcode::MULHU, 0x00000000, 0xffffffff, 0x00000001);
2799 simple_op_code_test(Opcode::MULHU, 0x00000000, 0x00000001, 0xffffffff);
2800
2801 simple_op_code_test(Opcode::MULHSU, 0x00000000, 0x00000000, 0x00000000);
2802 simple_op_code_test(Opcode::MULHSU, 0x00000000, 0x00000001, 0x00000001);
2803 simple_op_code_test(Opcode::MULHSU, 0x00000000, 0x00000003, 0x00000007);
2804 simple_op_code_test(Opcode::MULHSU, 0x00000000, 0x00000000, 0xffff8000);
2805 simple_op_code_test(Opcode::MULHSU, 0x00000000, 0x80000000, 0x00000000);
2806 simple_op_code_test(Opcode::MULHSU, 0x80004000, 0x80000000, 0xffff8000);
2807 simple_op_code_test(Opcode::MULHSU, 0xffff0081, 0xaaaaaaab, 0x0002fe7d);
2808 simple_op_code_test(Opcode::MULHSU, 0x0001fefe, 0x0002fe7d, 0xaaaaaaab);
2809 simple_op_code_test(Opcode::MULHSU, 0xff010000, 0xff000000, 0xff000000);
2810 simple_op_code_test(Opcode::MULHSU, 0xffffffff, 0xffffffff, 0xffffffff);
2811 simple_op_code_test(Opcode::MULHSU, 0xffffffff, 0xffffffff, 0x00000001);
2812 simple_op_code_test(Opcode::MULHSU, 0x00000000, 0x00000001, 0xffffffff);
2813
2814 simple_op_code_test(Opcode::MULH, 0x00000000, 0x00000000, 0x00000000);
2815 simple_op_code_test(Opcode::MULH, 0x00000000, 0x00000001, 0x00000001);
2816 simple_op_code_test(Opcode::MULH, 0x00000000, 0x00000003, 0x00000007);
2817 simple_op_code_test(Opcode::MULH, 0x00000000, 0x00000000, 0xffff8000);
2818 simple_op_code_test(Opcode::MULH, 0x00000000, 0x80000000, 0x00000000);
2819 simple_op_code_test(Opcode::MULH, 0x00000000, 0x80000000, 0x00000000);
2820 simple_op_code_test(Opcode::MULH, 0xffff0081, 0xaaaaaaab, 0x0002fe7d);
2821 simple_op_code_test(Opcode::MULH, 0xffff0081, 0x0002fe7d, 0xaaaaaaab);
2822 simple_op_code_test(Opcode::MULH, 0x00010000, 0xff000000, 0xff000000);
2823 simple_op_code_test(Opcode::MULH, 0x00000000, 0xffffffff, 0xffffffff);
2824 simple_op_code_test(Opcode::MULH, 0xffffffff, 0xffffffff, 0x00000001);
2825 simple_op_code_test(Opcode::MULH, 0xffffffff, 0x00000001, 0xffffffff);
2826
2827 simple_op_code_test(Opcode::MUL, 0x00001200, 0x00007e00, 0xb6db6db7);
2828 simple_op_code_test(Opcode::MUL, 0x00001240, 0x00007fc0, 0xb6db6db7);
2829 simple_op_code_test(Opcode::MUL, 0x00000000, 0x00000000, 0x00000000);
2830 simple_op_code_test(Opcode::MUL, 0x00000001, 0x00000001, 0x00000001);
2831 simple_op_code_test(Opcode::MUL, 0x00000015, 0x00000003, 0x00000007);
2832 simple_op_code_test(Opcode::MUL, 0x00000000, 0x00000000, 0xffff8000);
2833 simple_op_code_test(Opcode::MUL, 0x00000000, 0x80000000, 0x00000000);
2834 simple_op_code_test(Opcode::MUL, 0x00000000, 0x80000000, 0xffff8000);
2835 simple_op_code_test(Opcode::MUL, 0x0000ff7f, 0xaaaaaaab, 0x0002fe7d);
2836 simple_op_code_test(Opcode::MUL, 0x0000ff7f, 0x0002fe7d, 0xaaaaaaab);
2837 simple_op_code_test(Opcode::MUL, 0x00000000, 0xff000000, 0xff000000);
2838 simple_op_code_test(Opcode::MUL, 0x00000001, 0xffffffff, 0xffffffff);
2839 simple_op_code_test(Opcode::MUL, 0xffffffff, 0xffffffff, 0x00000001);
2840 simple_op_code_test(Opcode::MUL, 0xffffffff, 0x00000001, 0xffffffff);
2841 }
2842
2843 fn neg(a: u32) -> u32 {
2844 u32::MAX - a + 1
2845 }
2846
2847 #[test]
2848 fn division_tests() {
2849 simple_op_code_test(Opcode::DIVU, 3, 20, 6);
2850 simple_op_code_test(Opcode::DIVU, 715_827_879, u32::MAX - 20 + 1, 6);
2851 simple_op_code_test(Opcode::DIVU, 0, 20, u32::MAX - 6 + 1);
2852 simple_op_code_test(Opcode::DIVU, 0, u32::MAX - 20 + 1, u32::MAX - 6 + 1);
2853
2854 simple_op_code_test(Opcode::DIVU, 1 << 31, 1 << 31, 1);
2855 simple_op_code_test(Opcode::DIVU, 0, 1 << 31, u32::MAX - 1 + 1);
2856
2857 simple_op_code_test(Opcode::DIVU, u32::MAX, 1 << 31, 0);
2858 simple_op_code_test(Opcode::DIVU, u32::MAX, 1, 0);
2859 simple_op_code_test(Opcode::DIVU, u32::MAX, 0, 0);
2860
2861 simple_op_code_test(Opcode::DIV, 3, 18, 6);
2862 simple_op_code_test(Opcode::DIV, neg(6), neg(24), 4);
2863 simple_op_code_test(Opcode::DIV, neg(2), 16, neg(8));
2864 simple_op_code_test(Opcode::DIV, neg(1), 0, 0);
2865
2866 simple_op_code_test(Opcode::DIV, 1 << 31, 1 << 31, neg(1));
2868 simple_op_code_test(Opcode::REM, 0, 1 << 31, neg(1));
2869 }
2870
2871 #[test]
2872 fn remainder_tests() {
2873 simple_op_code_test(Opcode::REM, 7, 16, 9);
2874 simple_op_code_test(Opcode::REM, neg(4), neg(22), 6);
2875 simple_op_code_test(Opcode::REM, 1, 25, neg(3));
2876 simple_op_code_test(Opcode::REM, neg(2), neg(22), neg(4));
2877 simple_op_code_test(Opcode::REM, 0, 873, 1);
2878 simple_op_code_test(Opcode::REM, 0, 873, neg(1));
2879 simple_op_code_test(Opcode::REM, 5, 5, 0);
2880 simple_op_code_test(Opcode::REM, neg(5), neg(5), 0);
2881 simple_op_code_test(Opcode::REM, 0, 0, 0);
2882
2883 simple_op_code_test(Opcode::REMU, 4, 18, 7);
2884 simple_op_code_test(Opcode::REMU, 6, neg(20), 11);
2885 simple_op_code_test(Opcode::REMU, 23, 23, neg(6));
2886 simple_op_code_test(Opcode::REMU, neg(21), neg(21), neg(11));
2887 simple_op_code_test(Opcode::REMU, 5, 5, 0);
2888 simple_op_code_test(Opcode::REMU, neg(1), neg(1), 0);
2889 simple_op_code_test(Opcode::REMU, 0, 0, 0);
2890 }
2891
2892 #[test]
2893 #[allow(clippy::unreadable_literal)]
2894 fn shift_tests() {
2895 simple_op_code_test(Opcode::SLL, 0x00000001, 0x00000001, 0);
2896 simple_op_code_test(Opcode::SLL, 0x00000002, 0x00000001, 1);
2897 simple_op_code_test(Opcode::SLL, 0x00000080, 0x00000001, 7);
2898 simple_op_code_test(Opcode::SLL, 0x00004000, 0x00000001, 14);
2899 simple_op_code_test(Opcode::SLL, 0x80000000, 0x00000001, 31);
2900 simple_op_code_test(Opcode::SLL, 0xffffffff, 0xffffffff, 0);
2901 simple_op_code_test(Opcode::SLL, 0xfffffffe, 0xffffffff, 1);
2902 simple_op_code_test(Opcode::SLL, 0xffffff80, 0xffffffff, 7);
2903 simple_op_code_test(Opcode::SLL, 0xffffc000, 0xffffffff, 14);
2904 simple_op_code_test(Opcode::SLL, 0x80000000, 0xffffffff, 31);
2905 simple_op_code_test(Opcode::SLL, 0x21212121, 0x21212121, 0);
2906 simple_op_code_test(Opcode::SLL, 0x42424242, 0x21212121, 1);
2907 simple_op_code_test(Opcode::SLL, 0x90909080, 0x21212121, 7);
2908 simple_op_code_test(Opcode::SLL, 0x48484000, 0x21212121, 14);
2909 simple_op_code_test(Opcode::SLL, 0x80000000, 0x21212121, 31);
2910 simple_op_code_test(Opcode::SLL, 0x21212121, 0x21212121, 0xffffffe0);
2911 simple_op_code_test(Opcode::SLL, 0x42424242, 0x21212121, 0xffffffe1);
2912 simple_op_code_test(Opcode::SLL, 0x90909080, 0x21212121, 0xffffffe7);
2913 simple_op_code_test(Opcode::SLL, 0x48484000, 0x21212121, 0xffffffee);
2914 simple_op_code_test(Opcode::SLL, 0x00000000, 0x21212120, 0xffffffff);
2915
2916 simple_op_code_test(Opcode::SRL, 0xffff8000, 0xffff8000, 0);
2917 simple_op_code_test(Opcode::SRL, 0x7fffc000, 0xffff8000, 1);
2918 simple_op_code_test(Opcode::SRL, 0x01ffff00, 0xffff8000, 7);
2919 simple_op_code_test(Opcode::SRL, 0x0003fffe, 0xffff8000, 14);
2920 simple_op_code_test(Opcode::SRL, 0x0001ffff, 0xffff8001, 15);
2921 simple_op_code_test(Opcode::SRL, 0xffffffff, 0xffffffff, 0);
2922 simple_op_code_test(Opcode::SRL, 0x7fffffff, 0xffffffff, 1);
2923 simple_op_code_test(Opcode::SRL, 0x01ffffff, 0xffffffff, 7);
2924 simple_op_code_test(Opcode::SRL, 0x0003ffff, 0xffffffff, 14);
2925 simple_op_code_test(Opcode::SRL, 0x00000001, 0xffffffff, 31);
2926 simple_op_code_test(Opcode::SRL, 0x21212121, 0x21212121, 0);
2927 simple_op_code_test(Opcode::SRL, 0x10909090, 0x21212121, 1);
2928 simple_op_code_test(Opcode::SRL, 0x00424242, 0x21212121, 7);
2929 simple_op_code_test(Opcode::SRL, 0x00008484, 0x21212121, 14);
2930 simple_op_code_test(Opcode::SRL, 0x00000000, 0x21212121, 31);
2931 simple_op_code_test(Opcode::SRL, 0x21212121, 0x21212121, 0xffffffe0);
2932 simple_op_code_test(Opcode::SRL, 0x10909090, 0x21212121, 0xffffffe1);
2933 simple_op_code_test(Opcode::SRL, 0x00424242, 0x21212121, 0xffffffe7);
2934 simple_op_code_test(Opcode::SRL, 0x00008484, 0x21212121, 0xffffffee);
2935 simple_op_code_test(Opcode::SRL, 0x00000000, 0x21212121, 0xffffffff);
2936
2937 simple_op_code_test(Opcode::SRA, 0x00000000, 0x00000000, 0);
2938 simple_op_code_test(Opcode::SRA, 0xc0000000, 0x80000000, 1);
2939 simple_op_code_test(Opcode::SRA, 0xff000000, 0x80000000, 7);
2940 simple_op_code_test(Opcode::SRA, 0xfffe0000, 0x80000000, 14);
2941 simple_op_code_test(Opcode::SRA, 0xffffffff, 0x80000001, 31);
2942 simple_op_code_test(Opcode::SRA, 0x7fffffff, 0x7fffffff, 0);
2943 simple_op_code_test(Opcode::SRA, 0x3fffffff, 0x7fffffff, 1);
2944 simple_op_code_test(Opcode::SRA, 0x00ffffff, 0x7fffffff, 7);
2945 simple_op_code_test(Opcode::SRA, 0x0001ffff, 0x7fffffff, 14);
2946 simple_op_code_test(Opcode::SRA, 0x00000000, 0x7fffffff, 31);
2947 simple_op_code_test(Opcode::SRA, 0x81818181, 0x81818181, 0);
2948 simple_op_code_test(Opcode::SRA, 0xc0c0c0c0, 0x81818181, 1);
2949 simple_op_code_test(Opcode::SRA, 0xff030303, 0x81818181, 7);
2950 simple_op_code_test(Opcode::SRA, 0xfffe0606, 0x81818181, 14);
2951 simple_op_code_test(Opcode::SRA, 0xffffffff, 0x81818181, 31);
2952 }
2953
2954 #[test]
2955 #[allow(clippy::unreadable_literal)]
2956 fn test_simple_memory_program_run() {
2957 let program = simple_memory_program();
2958 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2959 runtime.run().unwrap();
2960
2961 assert_eq!(runtime.register(Register::X28), 0x12348765);
2963
2964 assert_eq!(runtime.register(Register::X27), 0x65);
2966 assert_eq!(runtime.register(Register::X26), 0x87);
2967 assert_eq!(runtime.register(Register::X25), 0x34);
2968 assert_eq!(runtime.register(Register::X24), 0x12);
2969
2970 assert_eq!(runtime.register(Register::X23), 0x65);
2972 assert_eq!(runtime.register(Register::X22), 0xffffff87);
2973
2974 assert_eq!(runtime.register(Register::X21), 0x8765);
2976 assert_eq!(runtime.register(Register::X20), 0x1234);
2977
2978 assert_eq!(runtime.register(Register::X19), 0xffff8765);
2980 assert_eq!(runtime.register(Register::X18), 0x1234);
2981
2982 assert_eq!(runtime.register(Register::X16), 0x12348725);
2984 assert_eq!(runtime.register(Register::X15), 0x12342525);
2985 assert_eq!(runtime.register(Register::X14), 0x12252525);
2986 assert_eq!(runtime.register(Register::X13), 0x25252525);
2987
2988 assert_eq!(runtime.register(Register::X12), 0x12346525);
2990 assert_eq!(runtime.register(Register::X11), 0x65256525);
2991 }
2992
2993 #[test]
2994 #[should_panic]
2995 fn test_invalid_address_access_sw() {
2996 let instructions = vec![
2997 Instruction::new(Opcode::ADD, 29, 0, 20, false, true),
2998 Instruction::new(Opcode::SW, 0, 29, 0, false, true),
2999 ];
3000
3001 let program = Program::new(instructions, 0, 0);
3002 let mut runtime = Executor::new(program, SP1CoreOpts::default());
3003 runtime.run().unwrap();
3004 }
3005
3006 #[test]
3007 #[should_panic]
3008 fn test_invalid_address_access_lw() {
3009 let instructions = vec![
3010 Instruction::new(Opcode::ADD, 29, 0, 20, false, true),
3011 Instruction::new(Opcode::LW, 29, 29, 0, false, true),
3012 ];
3013
3014 let program = Program::new(instructions, 0, 0);
3015 let mut runtime = Executor::new(program, SP1CoreOpts::default());
3016 runtime.run().unwrap();
3017 }
3018
3019 #[test]
3020 #[should_panic]
3021 fn test_invalid_address_syscall() {
3022 let instructions = vec![
3023 Instruction::new(Opcode::ADD, 5, 0, SHA_COMPRESS, false, true),
3024 Instruction::new(Opcode::ADD, 10, 0, 10, false, true),
3025 Instruction::new(Opcode::ADD, 11, 10, 20, false, true),
3026 Instruction::new(Opcode::ECALL, 5, 10, 11, false, false),
3027 ];
3028
3029 let program = Program::new(instructions, 0, 0);
3030 let mut runtime = Executor::new(program, SP1CoreOpts::default());
3031 runtime.run().unwrap();
3032 }
3033}