1#[cfg(feature = "profiling")]
2use std::{fs::File, io::BufWriter};
3use std::{str::FromStr, sync::Arc};
4
5use crate::estimator::RecordEstimator;
6#[cfg(feature = "profiling")]
7use crate::profiler::Profiler;
8
9use clap::ValueEnum;
10use enum_map::EnumMap;
11use hashbrown::HashMap;
12use serde::{Deserialize, Serialize};
13use sp1_primitives::consts::BABYBEAR_PRIME;
14use sp1_stark::{air::PublicValues, SP1CoreOpts};
15use strum::IntoEnumIterator;
16use thiserror::Error;
17
18use crate::{
19 context::{IoOptions, SP1Context},
20 dependencies::{
21 emit_auipc_dependency, emit_branch_dependencies, emit_divrem_dependencies,
22 emit_jump_dependencies, emit_memory_dependencies,
23 },
24 estimate_riscv_lde_size,
25 events::{
26 AUIPCEvent, AluEvent, BranchEvent, CpuEvent, JumpEvent, MemInstrEvent,
27 MemoryAccessPosition, MemoryInitializeFinalizeEvent, MemoryLocalEvent, MemoryReadRecord,
28 MemoryRecord, MemoryRecordEnum, MemoryWriteRecord, SyscallEvent,
29 NUM_LOCAL_MEMORY_ENTRIES_PER_ROW_EXEC,
30 },
31 hook::{HookEnv, HookRegistry},
32 memory::{Entry, Memory},
33 pad_rv32im_event_counts,
34 record::{ExecutionRecord, MemoryAccessRecord},
35 report::ExecutionReport,
36 state::{ExecutionState, ForkState},
37 subproof::SubproofVerifier,
38 syscalls::{default_syscall_map, Syscall, SyscallCode, SyscallContext},
39 CoreAirId, Instruction, MaximalShapes, Opcode, Program, Register, RiscvAirId,
40};
41
42pub const DEFAULT_PC_INC: u32 = 4;
45pub const UNUSED_PC: u32 = 1;
48
49pub const MAX_PROGRAM_SIZE: usize = 1 << 22;
51
52#[derive(Debug, Clone, Copy, PartialEq, Eq)]
53pub enum DeferredProofVerification {
55 Enabled,
57 Disabled,
59}
60
61impl From<bool> for DeferredProofVerification {
62 fn from(value: bool) -> Self {
63 if value {
64 DeferredProofVerification::Enabled
65 } else {
66 DeferredProofVerification::Disabled
67 }
68 }
69}
70
71pub struct Executor<'a> {
76 pub program: Arc<Program>,
78
79 pub state: ExecutionState,
81
82 pub memory_checkpoint: Memory<Option<MemoryRecord>>,
85
86 pub uninitialized_memory_checkpoint: Memory<bool>,
90
91 pub report: ExecutionReport,
93
94 pub executor_mode: ExecutorMode,
96
97 pub memory_accesses: MemoryAccessRecord,
99
100 pub unconstrained: bool,
106
107 pub print_report: bool,
109
110 pub record_estimator: Option<Box<RecordEstimator>>,
112
113 pub emit_global_memory_events: bool,
116
117 pub shard_size: u32,
119
120 pub shard_batch_size: u32,
122
123 pub max_syscall_cycles: u32,
125
126 pub syscall_map: HashMap<SyscallCode, Arc<dyn Syscall>>,
128
129 pub opts: SP1CoreOpts,
131
132 pub max_cycles: Option<u64>,
134
135 pub record: Box<ExecutionRecord>,
137
138 pub records: Vec<Box<ExecutionRecord>>,
140
141 pub local_memory_access: HashMap<u32, MemoryLocalEvent>,
143
144 pub cycle_tracker: HashMap<String, (u64, u32)>,
146
147 pub io_buf: HashMap<u32, String>,
149
150 #[cfg(feature = "profiling")]
154 pub profiler: Option<(Profiler, BufWriter<File>)>,
155
156 pub unconstrained_state: Box<ForkState>,
158
159 pub local_counts: LocalCounts,
161
162 pub subproof_verifier: Option<&'a dyn SubproofVerifier>,
164
165 pub hook_registry: HookRegistry<'a>,
167
168 pub maximal_shapes: Option<MaximalShapes>,
170
171 pub costs: HashMap<RiscvAirId, u64>,
173
174 pub deferred_proof_verification: DeferredProofVerification,
177
178 pub shape_check_frequency: u64,
180
181 pub lde_size_check: bool,
183
184 pub lde_size_threshold: u64,
186
187 pub io_options: IoOptions<'a>,
189
190 event_counts: EnumMap<RiscvAirId, u64>,
192}
193
194#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, ValueEnum)]
196pub enum ExecutorMode {
197 Simple,
199 Checkpoint,
201 Trace,
203 ShapeCollection,
205}
206
207#[derive(Debug, Default, Clone, PartialEq, Eq)]
209pub struct LocalCounts {
210 pub event_counts: Box<EnumMap<Opcode, u64>>,
212 pub syscalls_sent: usize,
214 pub local_mem: usize,
216}
217
218#[derive(Error, Debug, Serialize, Deserialize, PartialEq, Eq)]
220pub enum ExecutionError {
221 #[error("execution failed with exit code {0}")]
223 HaltWithNonZeroExitCode(u32),
224
225 #[error("invalid memory access for opcode {0} and address {1}")]
227 InvalidMemoryAccess(Opcode, u32),
228
229 #[error("unimplemented syscall {0}")]
231 UnsupportedSyscall(u32),
232
233 #[error("breakpoint encountered")]
235 Breakpoint(),
236
237 #[error("exceeded cycle limit of {0}")]
239 ExceededCycleLimit(u64),
240
241 #[error("syscall called in unconstrained mode")]
243 InvalidSyscallUsage(u64),
244
245 #[error("got unimplemented as opcode")]
247 Unimplemented(),
248
249 #[error("program ended in unconstrained mode")]
251 EndInUnconstrained(),
252
253 #[error("unconstrained cycle limit exceeded")]
255 UnconstrainedCycleLimitExceeded(u64),
256}
257
258impl<'a> Executor<'a> {
259 #[must_use]
261 pub fn new(program: Program, opts: SP1CoreOpts) -> Self {
262 Self::with_context(program, opts, SP1Context::default())
263 }
264
265 #[inline]
276 #[allow(unused_variables)]
277 pub fn maybe_setup_profiler(&mut self, elf_bytes: &[u8]) {
278 #[cfg(feature = "profiling")]
279 {
280 let trace_buf = std::env::var("TRACE_FILE").ok().map(|file| {
281 let file = File::create(file).unwrap();
282 BufWriter::new(file)
283 });
284
285 if let Some(trace_buf) = trace_buf {
286 eprintln!("Profiling enabled");
287
288 let sample_rate = std::env::var("TRACE_SAMPLE_RATE")
289 .ok()
290 .and_then(|rate| {
291 eprintln!("Profiling sample rate: {rate}");
292 rate.parse::<u32>().ok()
293 })
294 .unwrap_or(1);
295
296 self.profiler = Some((
297 Profiler::new(elf_bytes, sample_rate as u64)
298 .expect("Failed to create profiler"),
299 trace_buf,
300 ));
301 }
302 }
303 }
304
305 #[must_use]
307 pub fn with_context(program: Program, opts: SP1CoreOpts, context: SP1Context<'a>) -> Self {
308 let program = Arc::new(program);
310
311 let record = ExecutionRecord::new(program.clone());
313
314 let syscall_map = default_syscall_map();
316 let max_syscall_cycles =
317 syscall_map.values().map(|syscall| syscall.num_extra_cycles()).max().unwrap_or(0);
318
319 let hook_registry = context.hook_registry.unwrap_or_default();
320
321 let costs: HashMap<String, usize> =
322 serde_json::from_str(include_str!("./artifacts/rv32im_costs.json")).unwrap();
323 let costs: HashMap<RiscvAirId, usize> =
324 costs.into_iter().map(|(k, v)| (RiscvAirId::from_str(&k).unwrap(), v)).collect();
325
326 Self {
327 record: Box::new(record),
328 records: vec![],
329 state: ExecutionState::new(program.pc_start),
330 program,
331 memory_accesses: MemoryAccessRecord::default(),
332 shard_size: (opts.shard_size as u32) * 4,
333 shard_batch_size: opts.shard_batch_size as u32,
334 cycle_tracker: HashMap::new(),
335 io_buf: HashMap::new(),
336 #[cfg(feature = "profiling")]
337 profiler: None,
338 unconstrained: false,
339 unconstrained_state: Box::new(ForkState::default()),
340 syscall_map,
341 executor_mode: ExecutorMode::Trace,
342 emit_global_memory_events: true,
343 max_syscall_cycles,
344 report: ExecutionReport::default(),
345 local_counts: LocalCounts::default(),
346 print_report: false,
347 record_estimator: None,
348 subproof_verifier: context.subproof_verifier,
349 hook_registry,
350 opts,
351 max_cycles: context.max_cycles,
352 deferred_proof_verification: context.deferred_proof_verification.into(),
353 memory_checkpoint: Memory::default(),
354 uninitialized_memory_checkpoint: Memory::default(),
355 local_memory_access: HashMap::new(),
356 maximal_shapes: None,
357 costs: costs.into_iter().map(|(k, v)| (k, v as u64)).collect(),
358 shape_check_frequency: 16,
359 lde_size_check: false,
360 lde_size_threshold: 0,
361 event_counts: EnumMap::default(),
362 io_options: context.io_options,
363 }
364 }
365
366 pub fn hook(&self, fd: u32, buf: &[u8]) -> eyre::Result<Vec<Vec<u8>>> {
373 Ok(self
374 .hook_registry
375 .get(fd)
376 .ok_or(eyre::eyre!("no hook found for file descriptor {}", fd))?
377 .invoke_hook(self.hook_env(), buf))
378 }
379
380 #[must_use]
382 pub fn hook_env<'b>(&'b self) -> HookEnv<'b, 'a> {
383 HookEnv { runtime: self }
384 }
385
386 #[must_use]
388 pub fn recover(program: Program, state: ExecutionState, opts: SP1CoreOpts) -> Self {
389 let mut runtime = Self::new(program, opts);
390 runtime.state = state;
391 runtime.deferred_proof_verification = DeferredProofVerification::Disabled;
394 runtime
395 }
396
397 #[allow(clippy::single_match_else)]
399 #[must_use]
400 pub fn registers(&mut self) -> [u32; 32] {
401 let mut registers = [0; 32];
402 for i in 0..32 {
403 let record = self.state.memory.registers.get(i);
404
405 if self.executor_mode == ExecutorMode::Checkpoint || self.unconstrained {
409 match record {
410 Some(record) => {
411 self.memory_checkpoint.registers.entry(i).or_insert_with(|| Some(*record));
412 }
413 None => {
414 self.memory_checkpoint.registers.entry(i).or_insert(None);
415 }
416 }
417 }
418
419 registers[i as usize] = match record {
420 Some(record) => record.value,
421 None => 0,
422 };
423 }
424 registers
425 }
426
427 #[must_use]
429 pub fn register(&mut self, register: Register) -> u32 {
430 let addr = register as u32;
431 let record = self.state.memory.registers.get(addr);
432
433 if self.executor_mode == ExecutorMode::Checkpoint || self.unconstrained {
434 match record {
435 Some(record) => {
436 self.memory_checkpoint.registers.entry(addr).or_insert_with(|| Some(*record));
437 }
438 None => {
439 self.memory_checkpoint.registers.entry(addr).or_insert(None);
440 }
441 }
442 }
443 match record {
444 Some(record) => record.value,
445 None => 0,
446 }
447 }
448
449 #[must_use]
453 pub fn word(&mut self, addr: u32) -> u32 {
454 #[allow(clippy::single_match_else)]
455 let record = self.state.memory.page_table.get(addr);
456
457 if self.executor_mode == ExecutorMode::Checkpoint || self.unconstrained {
458 match record {
459 Some(record) => {
460 self.memory_checkpoint.page_table.entry(addr).or_insert_with(|| Some(*record));
461 }
462 None => {
463 self.memory_checkpoint.page_table.entry(addr).or_insert(None);
464 }
465 }
466 }
467
468 match record {
469 Some(record) => record.value,
470 None => 0,
471 }
472 }
473
474 #[must_use]
478 pub fn byte(&mut self, addr: u32) -> u8 {
479 let word = self.word(addr - addr % 4);
480 (word >> ((addr % 4) * 8)) as u8
481 }
482
483 #[must_use]
485 pub const fn timestamp(&self, position: &MemoryAccessPosition) -> u32 {
486 self.state.clk + *position as u32
487 }
488
489 #[must_use]
491 #[inline]
492 pub fn shard(&self) -> u32 {
493 self.state.current_shard
494 }
495
496 pub fn mr(
498 &mut self,
499 addr: u32,
500 shard: u32,
501 timestamp: u32,
502 local_memory_access: Option<&mut HashMap<u32, MemoryLocalEvent>>,
503 ) -> MemoryReadRecord {
504 if addr % 4 != 0 || addr <= Register::X31 as u32 || addr >= BABYBEAR_PRIME {
507 panic!("Invalid memory access: addr={addr}");
508 }
509
510 let entry = self.state.memory.page_table.entry(addr);
512 if self.executor_mode == ExecutorMode::Checkpoint || self.unconstrained {
513 match entry {
514 Entry::Occupied(ref entry) => {
515 let record = entry.get();
516 self.memory_checkpoint.page_table.entry(addr).or_insert_with(|| Some(*record));
517 }
518 Entry::Vacant(_) => {
519 self.memory_checkpoint.page_table.entry(addr).or_insert(None);
520 }
521 }
522 }
523
524 if self.unconstrained {
527 let record = match entry {
528 Entry::Occupied(ref entry) => Some(entry.get()),
529 Entry::Vacant(_) => None,
530 };
531 self.unconstrained_state.memory_diff.entry(addr).or_insert(record.copied());
532 }
533
534 let record: &mut MemoryRecord = match entry {
536 Entry::Occupied(entry) => entry.into_mut(),
537 Entry::Vacant(entry) => {
538 let value = self.state.uninitialized_memory.page_table.get(addr).unwrap_or(&0);
540 self.uninitialized_memory_checkpoint
541 .page_table
542 .entry(addr)
543 .or_insert_with(|| *value != 0);
544 entry.insert(MemoryRecord { value: *value, shard: 0, timestamp: 0 })
545 }
546 };
547
548 if !self.unconstrained && (record.shard != shard || local_memory_access.is_some()) {
555 self.local_counts.local_mem += 1;
556 }
557
558 if !self.unconstrained {
559 if let Some(estimator) = &mut self.record_estimator {
560 if record.shard != shard {
561 estimator.current_local_mem += 1;
562 }
563 let current_touched_compressed_addresses = if local_memory_access.is_some() {
564 &mut estimator.current_precompile_touched_compressed_addresses
565 } else {
566 &mut estimator.current_touched_compressed_addresses
567 };
568 current_touched_compressed_addresses.insert(addr >> 2);
569 }
570 }
571
572 let prev_record = *record;
573 record.shard = shard;
574 record.timestamp = timestamp;
575
576 if !self.unconstrained && self.executor_mode == ExecutorMode::Trace {
577 let local_memory_access = if let Some(local_memory_access) = local_memory_access {
578 local_memory_access
579 } else {
580 &mut self.local_memory_access
581 };
582
583 local_memory_access
584 .entry(addr)
585 .and_modify(|e| {
586 e.final_mem_access = *record;
587 })
588 .or_insert(MemoryLocalEvent {
589 addr,
590 initial_mem_access: prev_record,
591 final_mem_access: *record,
592 });
593 }
594
595 MemoryReadRecord::new(
597 record.value,
598 record.shard,
599 record.timestamp,
600 prev_record.shard,
601 prev_record.timestamp,
602 )
603 }
604
605 pub fn rr(&mut self, register: Register, shard: u32, timestamp: u32) -> u32 {
609 let addr = register as u32;
611 let entry = self.state.memory.registers.entry(addr);
612 if self.executor_mode == ExecutorMode::Checkpoint || self.unconstrained {
613 match entry {
614 Entry::Occupied(ref entry) => {
615 let record = entry.get();
616 self.memory_checkpoint.registers.entry(addr).or_insert_with(|| Some(*record));
617 }
618 Entry::Vacant(_) => {
619 self.memory_checkpoint.registers.entry(addr).or_insert(None);
620 }
621 }
622 }
623
624 if self.unconstrained {
627 let record = match entry {
628 Entry::Occupied(ref entry) => Some(entry.get()),
629 Entry::Vacant(_) => None,
630 };
631 self.unconstrained_state.memory_diff.entry(addr).or_insert(record.copied());
632 }
633
634 let record: &mut MemoryRecord = match entry {
636 Entry::Occupied(entry) => entry.into_mut(),
637 Entry::Vacant(entry) => {
638 let value = self.state.uninitialized_memory.registers.get(addr).unwrap_or(&0);
640 self.uninitialized_memory_checkpoint
641 .registers
642 .entry(addr)
643 .or_insert_with(|| *value != 0);
644 entry.insert(MemoryRecord { value: *value, shard: 0, timestamp: 0 })
645 }
646 };
647
648 record.shard = shard;
649 record.timestamp = timestamp;
650 record.value
651 }
652
653 pub fn rr_traced(
657 &mut self,
658 register: Register,
659 shard: u32,
660 timestamp: u32,
661 local_memory_access: Option<&mut HashMap<u32, MemoryLocalEvent>>,
662 ) -> MemoryReadRecord {
663 let addr = register as u32;
665 let entry = self.state.memory.registers.entry(addr);
666 if self.executor_mode == ExecutorMode::Checkpoint || self.unconstrained {
667 match entry {
668 Entry::Occupied(ref entry) => {
669 let record = entry.get();
670 self.memory_checkpoint.registers.entry(addr).or_insert_with(|| Some(*record));
671 }
672 Entry::Vacant(_) => {
673 self.memory_checkpoint.registers.entry(addr).or_insert(None);
674 }
675 }
676 }
677 if self.unconstrained {
680 let record = match entry {
681 Entry::Occupied(ref entry) => Some(entry.get()),
682 Entry::Vacant(_) => None,
683 };
684 self.unconstrained_state.memory_diff.entry(addr).or_insert(record.copied());
685 }
686 let record: &mut MemoryRecord = match entry {
688 Entry::Occupied(entry) => entry.into_mut(),
689 Entry::Vacant(entry) => {
690 let value = self.state.uninitialized_memory.registers.get(addr).unwrap_or(&0);
692 self.uninitialized_memory_checkpoint
693 .registers
694 .entry(addr)
695 .or_insert_with(|| *value != 0);
696 entry.insert(MemoryRecord { value: *value, shard: 0, timestamp: 0 })
697 }
698 };
699 let prev_record = *record;
700 record.shard = shard;
701 record.timestamp = timestamp;
702 if !self.unconstrained && self.executor_mode == ExecutorMode::Trace {
703 let local_memory_access = if let Some(local_memory_access) = local_memory_access {
704 local_memory_access
705 } else {
706 &mut self.local_memory_access
707 };
708 local_memory_access
709 .entry(addr)
710 .and_modify(|e| {
711 e.final_mem_access = *record;
712 })
713 .or_insert(MemoryLocalEvent {
714 addr,
715 initial_mem_access: prev_record,
716 final_mem_access: *record,
717 });
718 }
719 MemoryReadRecord::new(
721 record.value,
722 record.shard,
723 record.timestamp,
724 prev_record.shard,
725 prev_record.timestamp,
726 )
727 }
728 pub fn mw(
730 &mut self,
731 addr: u32,
732 value: u32,
733 shard: u32,
734 timestamp: u32,
735 local_memory_access: Option<&mut HashMap<u32, MemoryLocalEvent>>,
736 ) -> MemoryWriteRecord {
737 if addr % 4 != 0 || addr <= Register::X31 as u32 || addr >= BABYBEAR_PRIME {
740 panic!("Invalid memory access: addr={addr}");
741 }
742
743 let entry = self.state.memory.page_table.entry(addr);
745 if self.executor_mode == ExecutorMode::Checkpoint || self.unconstrained {
746 match entry {
747 Entry::Occupied(ref entry) => {
748 let record = entry.get();
749 self.memory_checkpoint.page_table.entry(addr).or_insert_with(|| Some(*record));
750 }
751 Entry::Vacant(_) => {
752 self.memory_checkpoint.page_table.entry(addr).or_insert(None);
753 }
754 }
755 }
756 if self.unconstrained {
759 let record = match entry {
760 Entry::Occupied(ref entry) => Some(entry.get()),
761 Entry::Vacant(_) => None,
762 };
763 self.unconstrained_state.memory_diff.entry(addr).or_insert(record.copied());
764 }
765 let record: &mut MemoryRecord = match entry {
767 Entry::Occupied(entry) => entry.into_mut(),
768 Entry::Vacant(entry) => {
769 let value = self.state.uninitialized_memory.page_table.get(addr).unwrap_or(&0);
771 self.uninitialized_memory_checkpoint
772 .page_table
773 .entry(addr)
774 .or_insert_with(|| *value != 0);
775
776 entry.insert(MemoryRecord { value: *value, shard: 0, timestamp: 0 })
777 }
778 };
779
780 if !self.unconstrained && (record.shard != shard || local_memory_access.is_some()) {
787 self.local_counts.local_mem += 1;
788 }
789
790 if !self.unconstrained {
791 if let Some(estimator) = &mut self.record_estimator {
792 if record.shard != shard {
793 estimator.current_local_mem += 1;
794 }
795 let current_touched_compressed_addresses = if local_memory_access.is_some() {
796 &mut estimator.current_precompile_touched_compressed_addresses
797 } else {
798 &mut estimator.current_touched_compressed_addresses
799 };
800 current_touched_compressed_addresses.insert(addr >> 2);
801 }
802 }
803
804 let prev_record = *record;
805 record.value = value;
806 record.shard = shard;
807 record.timestamp = timestamp;
808 if !self.unconstrained && self.executor_mode == ExecutorMode::Trace {
809 let local_memory_access = if let Some(local_memory_access) = local_memory_access {
810 local_memory_access
811 } else {
812 &mut self.local_memory_access
813 };
814
815 local_memory_access
816 .entry(addr)
817 .and_modify(|e| {
818 e.final_mem_access = *record;
819 })
820 .or_insert(MemoryLocalEvent {
821 addr,
822 initial_mem_access: prev_record,
823 final_mem_access: *record,
824 });
825 }
826
827 MemoryWriteRecord::new(
829 record.value,
830 record.shard,
831 record.timestamp,
832 prev_record.value,
833 prev_record.shard,
834 prev_record.timestamp,
835 )
836 }
837
838 pub fn rw_traced(
842 &mut self,
843 register: Register,
844 value: u32,
845 shard: u32,
846 timestamp: u32,
847 local_memory_access: Option<&mut HashMap<u32, MemoryLocalEvent>>,
848 ) -> MemoryWriteRecord {
849 let addr = register as u32;
850
851 let entry = self.state.memory.registers.entry(addr);
853 if self.unconstrained {
854 match entry {
855 Entry::Occupied(ref entry) => {
856 let record = entry.get();
857 self.memory_checkpoint.registers.entry(addr).or_insert_with(|| Some(*record));
858 }
859 Entry::Vacant(_) => {
860 self.memory_checkpoint.registers.entry(addr).or_insert(None);
861 }
862 }
863 }
864
865 if self.unconstrained {
868 let record = match entry {
869 Entry::Occupied(ref entry) => Some(entry.get()),
870 Entry::Vacant(_) => None,
871 };
872 self.unconstrained_state.memory_diff.entry(addr).or_insert(record.copied());
873 }
874
875 let record: &mut MemoryRecord = match entry {
877 Entry::Occupied(entry) => entry.into_mut(),
878 Entry::Vacant(entry) => {
879 let value = self.state.uninitialized_memory.registers.get(addr).unwrap_or(&0);
881 self.uninitialized_memory_checkpoint
882 .registers
883 .entry(addr)
884 .or_insert_with(|| *value != 0);
885
886 entry.insert(MemoryRecord { value: *value, shard: 0, timestamp: 0 })
887 }
888 };
889
890 let prev_record = *record;
891 record.value = value;
892 record.shard = shard;
893 record.timestamp = timestamp;
894
895 if !self.unconstrained {
896 let local_memory_access = if let Some(local_memory_access) = local_memory_access {
897 local_memory_access
898 } else {
899 &mut self.local_memory_access
900 };
901
902 local_memory_access
903 .entry(addr)
904 .and_modify(|e| {
905 e.final_mem_access = *record;
906 })
907 .or_insert(MemoryLocalEvent {
908 addr,
909 initial_mem_access: prev_record,
910 final_mem_access: *record,
911 });
912 }
913
914 MemoryWriteRecord::new(
916 record.value,
917 record.shard,
918 record.timestamp,
919 prev_record.value,
920 prev_record.shard,
921 prev_record.timestamp,
922 )
923 }
924
925 #[inline]
929 pub fn rw(&mut self, register: Register, value: u32, shard: u32, timestamp: u32) {
930 let addr = register as u32;
931 let entry = self.state.memory.registers.entry(addr);
933 if self.executor_mode == ExecutorMode::Checkpoint || self.unconstrained {
934 match entry {
935 Entry::Occupied(ref entry) => {
936 let record = entry.get();
937 self.memory_checkpoint.registers.entry(addr).or_insert_with(|| Some(*record));
938 }
939 Entry::Vacant(_) => {
940 self.memory_checkpoint.registers.entry(addr).or_insert(None);
941 }
942 }
943 }
944
945 if self.unconstrained {
948 let record = match entry {
949 Entry::Occupied(ref entry) => Some(entry.get()),
950 Entry::Vacant(_) => None,
951 };
952 self.unconstrained_state.memory_diff.entry(addr).or_insert(record.copied());
953 }
954
955 let record: &mut MemoryRecord = match entry {
957 Entry::Occupied(entry) => entry.into_mut(),
958 Entry::Vacant(entry) => {
959 let value = self.state.uninitialized_memory.registers.get(addr).unwrap_or(&0);
961 self.uninitialized_memory_checkpoint
962 .registers
963 .entry(addr)
964 .or_insert_with(|| *value != 0);
965
966 entry.insert(MemoryRecord { value: *value, shard: 0, timestamp: 0 })
967 }
968 };
969
970 record.value = value;
971 record.shard = shard;
972 record.timestamp = timestamp;
973 }
974
975 #[inline]
977 pub fn mr_cpu(&mut self, addr: u32) -> u32 {
978 let record =
980 self.mr(addr, self.shard(), self.timestamp(&MemoryAccessPosition::Memory), None);
981 if self.executor_mode == ExecutorMode::Trace {
983 self.memory_accesses.memory = Some(record.into());
984 }
985 record.value
986 }
987
988 #[inline]
990 pub fn rr_cpu(&mut self, register: Register, position: MemoryAccessPosition) -> u32 {
991 if self.executor_mode == ExecutorMode::Trace {
993 let record = self.rr_traced(register, self.shard(), self.timestamp(&position), None);
994 if !self.unconstrained {
995 match position {
996 MemoryAccessPosition::A => self.memory_accesses.a = Some(record.into()),
997 MemoryAccessPosition::B => self.memory_accesses.b = Some(record.into()),
998 MemoryAccessPosition::C => self.memory_accesses.c = Some(record.into()),
999 MemoryAccessPosition::Memory => {
1000 self.memory_accesses.memory = Some(record.into());
1001 }
1002 }
1003 }
1004 record.value
1005 } else {
1006 self.rr(register, self.shard(), self.timestamp(&position))
1007 }
1008 }
1009
1010 pub fn mw_cpu(&mut self, addr: u32, value: u32) {
1017 let record =
1019 self.mw(addr, value, self.shard(), self.timestamp(&MemoryAccessPosition::Memory), None);
1020 if self.executor_mode == ExecutorMode::Trace {
1022 debug_assert!(self.memory_accesses.memory.is_none());
1023 self.memory_accesses.memory = Some(record.into());
1024 }
1025 }
1026
1027 pub fn rw_cpu(&mut self, register: Register, value: u32) {
1029 let position = MemoryAccessPosition::A;
1031
1032 let value = if register == Register::X0 { 0 } else { value };
1035
1036 if self.executor_mode == ExecutorMode::Trace {
1038 let record =
1039 self.rw_traced(register, value, self.shard(), self.timestamp(&position), None);
1040 if !self.unconstrained {
1041 debug_assert!(self.memory_accesses.a.is_none());
1043 self.memory_accesses.a = Some(record.into());
1044 }
1045 } else {
1046 self.rw(register, value, self.shard(), self.timestamp(&position));
1047 }
1048 }
1049
1050 #[allow(clippy::too_many_arguments)]
1052 fn emit_events(
1053 &mut self,
1054 clk: u32,
1055 next_pc: u32,
1056 instruction: &Instruction,
1057 syscall_code: SyscallCode,
1058 a: u32,
1059 b: u32,
1060 c: u32,
1061 op_a_0: bool,
1062 record: MemoryAccessRecord,
1063 exit_code: u32,
1064 ) {
1065 self.emit_cpu(clk, next_pc, a, b, c, record, exit_code);
1066
1067 if instruction.is_alu_instruction() {
1068 self.emit_alu_event(instruction.opcode, a, b, c, op_a_0);
1069 } else if instruction.is_memory_load_instruction() ||
1070 instruction.is_memory_store_instruction()
1071 {
1072 self.emit_mem_instr_event(instruction.opcode, a, b, c, op_a_0);
1073 } else if instruction.is_branch_instruction() {
1074 self.emit_branch_event(instruction.opcode, a, b, c, op_a_0, next_pc);
1075 } else if instruction.is_jump_instruction() {
1076 self.emit_jump_event(instruction.opcode, a, b, c, op_a_0, next_pc);
1077 } else if instruction.is_auipc_instruction() {
1078 self.emit_auipc_event(instruction.opcode, a, b, c, op_a_0);
1079 } else if instruction.is_ecall_instruction() {
1080 self.emit_syscall_event(clk, record.a, op_a_0, syscall_code, b, c, next_pc);
1081 } else {
1082 unreachable!()
1083 }
1084 }
1085
1086 #[allow(clippy::too_many_arguments)]
1088 #[inline]
1089 fn emit_cpu(
1090 &mut self,
1091 clk: u32,
1092 next_pc: u32,
1093 a: u32,
1094 b: u32,
1095 c: u32,
1096 record: MemoryAccessRecord,
1097 exit_code: u32,
1098 ) {
1099 self.record.cpu_events.push(CpuEvent {
1100 clk,
1101 pc: self.state.pc,
1102 next_pc,
1103 a,
1104 a_record: record.a,
1105 b,
1106 b_record: record.b,
1107 c,
1108 c_record: record.c,
1109 exit_code,
1110 });
1111 }
1112
1113 fn emit_alu_event(&mut self, opcode: Opcode, a: u32, b: u32, c: u32, op_a_0: bool) {
1115 let event = AluEvent { pc: self.state.pc, opcode, a, b, c, op_a_0 };
1116 match opcode {
1117 Opcode::ADD => {
1118 self.record.add_events.push(event);
1119 }
1120 Opcode::SUB => {
1121 self.record.sub_events.push(event);
1122 }
1123 Opcode::XOR | Opcode::OR | Opcode::AND => {
1124 self.record.bitwise_events.push(event);
1125 }
1126 Opcode::SLL => {
1127 self.record.shift_left_events.push(event);
1128 }
1129 Opcode::SRL | Opcode::SRA => {
1130 self.record.shift_right_events.push(event);
1131 }
1132 Opcode::SLT | Opcode::SLTU => {
1133 self.record.lt_events.push(event);
1134 }
1135 Opcode::MUL | Opcode::MULHU | Opcode::MULHSU | Opcode::MULH => {
1136 self.record.mul_events.push(event);
1137 }
1138 Opcode::DIVU | Opcode::REMU | Opcode::DIV | Opcode::REM => {
1139 self.record.divrem_events.push(event);
1140 emit_divrem_dependencies(self, event);
1141 }
1142 _ => unreachable!(),
1143 }
1144 }
1145
1146 #[inline]
1148 fn emit_mem_instr_event(&mut self, opcode: Opcode, a: u32, b: u32, c: u32, op_a_0: bool) {
1149 let event = MemInstrEvent {
1150 shard: self.shard(),
1151 clk: self.state.clk,
1152 pc: self.state.pc,
1153 opcode,
1154 a,
1155 b,
1156 c,
1157 op_a_0,
1158 mem_access: self.memory_accesses.memory.expect("Must have memory access"),
1159 };
1160
1161 self.record.memory_instr_events.push(event);
1162 emit_memory_dependencies(
1163 self,
1164 event,
1165 self.memory_accesses.memory.expect("Must have memory access").current_record(),
1166 );
1167 }
1168
1169 #[inline]
1171 fn emit_branch_event(
1172 &mut self,
1173 opcode: Opcode,
1174 a: u32,
1175 b: u32,
1176 c: u32,
1177 op_a_0: bool,
1178 next_pc: u32,
1179 ) {
1180 let event = BranchEvent { pc: self.state.pc, next_pc, opcode, a, b, c, op_a_0 };
1181 self.record.branch_events.push(event);
1182 emit_branch_dependencies(self, event);
1183 }
1184
1185 #[inline]
1187 fn emit_jump_event(
1188 &mut self,
1189 opcode: Opcode,
1190 a: u32,
1191 b: u32,
1192 c: u32,
1193 op_a_0: bool,
1194 next_pc: u32,
1195 ) {
1196 let event = JumpEvent::new(self.state.pc, next_pc, opcode, a, b, c, op_a_0);
1197 self.record.jump_events.push(event);
1198 emit_jump_dependencies(self, event);
1199 }
1200
1201 #[inline]
1203 fn emit_auipc_event(&mut self, opcode: Opcode, a: u32, b: u32, c: u32, op_a_0: bool) {
1204 let event = AUIPCEvent::new(self.state.pc, opcode, a, b, c, op_a_0);
1205 self.record.auipc_events.push(event);
1206 emit_auipc_dependency(self, event);
1207 }
1208
1209 #[allow(clippy::too_many_arguments)]
1211 #[inline]
1212 pub(crate) fn syscall_event(
1213 &self,
1214 clk: u32,
1215 a_record: Option<MemoryRecordEnum>,
1216 op_a_0: Option<bool>,
1217 syscall_code: SyscallCode,
1218 arg1: u32,
1219 arg2: u32,
1220 next_pc: u32,
1221 ) -> SyscallEvent {
1222 let (write, is_real) = match a_record {
1223 Some(MemoryRecordEnum::Write(record)) => (record, true),
1224 _ => (MemoryWriteRecord::default(), false),
1225 };
1226
1227 let op_a_0 = op_a_0.unwrap_or(false);
1233
1234 SyscallEvent {
1235 shard: self.shard(),
1236 clk,
1237 pc: self.state.pc,
1238 next_pc,
1239 a_record: write,
1240 a_record_is_real: is_real,
1241 op_a_0,
1242 syscall_code,
1243 syscall_id: syscall_code.syscall_id(),
1244 arg1,
1245 arg2,
1246 }
1247 }
1248
1249 #[allow(clippy::too_many_arguments)]
1251 fn emit_syscall_event(
1252 &mut self,
1253 clk: u32,
1254 a_record: Option<MemoryRecordEnum>,
1255 op_a_0: bool,
1256 syscall_code: SyscallCode,
1257 arg1: u32,
1258 arg2: u32,
1259 next_pc: u32,
1260 ) {
1261 let syscall_event =
1262 self.syscall_event(clk, a_record, Some(op_a_0), syscall_code, arg1, arg2, next_pc);
1263
1264 self.record.syscall_events.push(syscall_event);
1265 }
1266
1267 fn alu_rr(&mut self, instruction: &Instruction) -> (Register, u32, u32) {
1269 if !instruction.imm_c {
1270 let (rd, rs1, rs2) = instruction.r_type();
1271 let c = self.rr_cpu(rs2, MemoryAccessPosition::C);
1272 let b = self.rr_cpu(rs1, MemoryAccessPosition::B);
1273 (rd, b, c)
1274 } else if !instruction.imm_b && instruction.imm_c {
1275 let (rd, rs1, imm) = instruction.i_type();
1276 let (rd, b, c) = (rd, self.rr_cpu(rs1, MemoryAccessPosition::B), imm);
1277 (rd, b, c)
1278 } else {
1279 debug_assert!(instruction.imm_b && instruction.imm_c);
1280 let (rd, b, c) =
1281 (Register::from_u8(instruction.op_a), instruction.op_b, instruction.op_c);
1282 (rd, b, c)
1283 }
1284 }
1285
1286 #[inline]
1288 fn alu_rw(&mut self, rd: Register, a: u32) {
1289 self.rw_cpu(rd, a);
1290 }
1291
1292 fn load_rr(&mut self, instruction: &Instruction) -> (Register, u32, u32, u32, u32) {
1294 let (rd, rs1, imm) = instruction.i_type();
1295 let (b, c) = (self.rr_cpu(rs1, MemoryAccessPosition::B), imm);
1296 let addr = b.wrapping_add(c);
1297 let memory_value = self.mr_cpu(align(addr));
1298 (rd, b, c, addr, memory_value)
1299 }
1300
1301 fn store_rr(&mut self, instruction: &Instruction) -> (u32, u32, u32, u32, u32) {
1303 let (rs1, rs2, imm) = instruction.s_type();
1304 let c = imm;
1305 let b = self.rr_cpu(rs2, MemoryAccessPosition::B);
1306 let a = self.rr_cpu(rs1, MemoryAccessPosition::A);
1307 let addr = b.wrapping_add(c);
1308 let memory_value = self.word(align(addr));
1309 (a, b, c, addr, memory_value)
1310 }
1311
1312 fn branch_rr(&mut self, instruction: &Instruction) -> (u32, u32, u32) {
1314 let (rs1, rs2, imm) = instruction.b_type();
1315 let c = imm;
1316 let b = self.rr_cpu(rs2, MemoryAccessPosition::B);
1317 let a = self.rr_cpu(rs1, MemoryAccessPosition::A);
1318 (a, b, c)
1319 }
1320
1321 #[inline]
1323 fn fetch(&self) -> Instruction {
1324 *self.program.fetch(self.state.pc)
1325 }
1326
1327 #[allow(clippy::too_many_lines)]
1329 fn execute_instruction(&mut self, instruction: &Instruction) -> Result<(), ExecutionError> {
1330 let mut clk = self.state.clk;
1333 let mut exit_code = 0u32;
1334 let mut next_pc = self.state.pc.wrapping_add(4);
1335 let (mut a, b, c): (u32, u32, u32);
1338
1339 if self.executor_mode == ExecutorMode::Trace {
1340 self.memory_accesses = MemoryAccessRecord::default();
1341 }
1342
1343 let mut syscall = SyscallCode::default();
1345
1346 if !self.unconstrained {
1347 if self.print_report {
1348 self.report.opcode_counts[instruction.opcode] += 1;
1349 }
1350 self.local_counts.event_counts[instruction.opcode] += 1;
1351 if instruction.is_memory_load_instruction() {
1352 self.local_counts.event_counts[Opcode::ADD] += 2;
1353 } else if instruction.is_jump_instruction() {
1354 self.local_counts.event_counts[Opcode::ADD] += 1;
1355 } else if instruction.is_branch_instruction() {
1356 self.local_counts.event_counts[Opcode::ADD] += 1;
1357 self.local_counts.event_counts[Opcode::SLTU] += 2;
1358 } else if instruction.is_divrem_instruction() {
1359 self.local_counts.event_counts[Opcode::MUL] += 2;
1360 self.local_counts.event_counts[Opcode::ADD] += 2;
1361 self.local_counts.event_counts[Opcode::SLTU] += 1;
1362 }
1363 }
1364
1365 if instruction.is_alu_instruction() {
1366 (a, b, c) = self.execute_alu(instruction);
1367 } else if instruction.is_memory_load_instruction() {
1368 (a, b, c) = self.execute_load(instruction)?;
1369 } else if instruction.is_memory_store_instruction() {
1370 (a, b, c) = self.execute_store(instruction)?;
1371 } else if instruction.is_branch_instruction() {
1372 (a, b, c, next_pc) = self.execute_branch(instruction, next_pc);
1373 } else if instruction.is_jump_instruction() {
1374 (a, b, c, next_pc) = self.execute_jump(instruction);
1375 } else if instruction.is_auipc_instruction() {
1376 let (rd, imm) = instruction.u_type();
1377 (b, c) = (imm, imm);
1378 a = self.state.pc.wrapping_add(b);
1379 self.rw_cpu(rd, a);
1380 } else if instruction.is_ecall_instruction() {
1381 (a, b, c, clk, next_pc, syscall, exit_code) = self.execute_ecall()?;
1382 } else if instruction.is_ebreak_instruction() {
1383 return Err(ExecutionError::Breakpoint());
1384 } else if instruction.is_unimp_instruction() {
1385 return Err(ExecutionError::Unimplemented());
1387 } else {
1388 eprintln!("unreachable: {:?}", instruction.opcode);
1389 unreachable!()
1390 }
1391
1392 let op_a_0 = instruction.op_a == Register::X0 as u8;
1394 if op_a_0 {
1395 a = 0;
1396 }
1397
1398 if self.executor_mode == ExecutorMode::Trace {
1400 self.emit_events(
1401 clk,
1402 next_pc,
1403 instruction,
1404 syscall,
1405 a,
1406 b,
1407 c,
1408 op_a_0,
1409 self.memory_accesses,
1410 exit_code,
1411 );
1412 }
1413
1414 self.state.pc = next_pc;
1416
1417 self.state.clk += 4;
1419
1420 Ok(())
1421 }
1422
1423 fn execute_alu(&mut self, instruction: &Instruction) -> (u32, u32, u32) {
1425 let (rd, b, c) = self.alu_rr(instruction);
1426 let a = match instruction.opcode {
1427 Opcode::ADD => b.wrapping_add(c),
1428 Opcode::SUB => b.wrapping_sub(c),
1429 Opcode::XOR => b ^ c,
1430 Opcode::OR => b | c,
1431 Opcode::AND => b & c,
1432 Opcode::SLL => b.wrapping_shl(c),
1433 Opcode::SRL => b.wrapping_shr(c),
1434 Opcode::SRA => (b as i32).wrapping_shr(c) as u32,
1435 Opcode::SLT => {
1436 if (b as i32) < (c as i32) {
1437 1
1438 } else {
1439 0
1440 }
1441 }
1442 Opcode::SLTU => {
1443 if b < c {
1444 1
1445 } else {
1446 0
1447 }
1448 }
1449 Opcode::MUL => b.wrapping_mul(c),
1450 Opcode::MULH => (((b as i32) as i64).wrapping_mul((c as i32) as i64) >> 32) as u32,
1451 Opcode::MULHU => ((b as u64).wrapping_mul(c as u64) >> 32) as u32,
1452 Opcode::MULHSU => (((b as i32) as i64).wrapping_mul(c as i64) >> 32) as u32,
1453 Opcode::DIV => {
1454 if c == 0 {
1455 u32::MAX
1456 } else {
1457 (b as i32).wrapping_div(c as i32) as u32
1458 }
1459 }
1460 Opcode::DIVU => {
1461 if c == 0 {
1462 u32::MAX
1463 } else {
1464 b.wrapping_div(c)
1465 }
1466 }
1467 Opcode::REM => {
1468 if c == 0 {
1469 b
1470 } else {
1471 (b as i32).wrapping_rem(c as i32) as u32
1472 }
1473 }
1474 Opcode::REMU => {
1475 if c == 0 {
1476 b
1477 } else {
1478 b.wrapping_rem(c)
1479 }
1480 }
1481 _ => unreachable!(),
1482 };
1483 self.alu_rw(rd, a);
1484 (a, b, c)
1485 }
1486
1487 fn execute_load(
1489 &mut self,
1490 instruction: &Instruction,
1491 ) -> Result<(u32, u32, u32), ExecutionError> {
1492 let (rd, b, c, addr, memory_read_value) = self.load_rr(instruction);
1493
1494 let a = match instruction.opcode {
1495 Opcode::LB => ((memory_read_value >> ((addr % 4) * 8)) & 0xFF) as i8 as i32 as u32,
1496 Opcode::LH => {
1497 if addr % 2 != 0 {
1498 return Err(ExecutionError::InvalidMemoryAccess(Opcode::LH, addr));
1499 }
1500 ((memory_read_value >> (((addr / 2) % 2) * 16)) & 0xFFFF) as i16 as i32 as u32
1501 }
1502 Opcode::LW => {
1503 if addr % 4 != 0 {
1504 return Err(ExecutionError::InvalidMemoryAccess(Opcode::LW, addr));
1505 }
1506 memory_read_value
1507 }
1508 Opcode::LBU => (memory_read_value >> ((addr % 4) * 8)) & 0xFF,
1509 Opcode::LHU => {
1510 if addr % 2 != 0 {
1511 return Err(ExecutionError::InvalidMemoryAccess(Opcode::LHU, addr));
1512 }
1513 (memory_read_value >> (((addr / 2) % 2) * 16)) & 0xFFFF
1514 }
1515 _ => unreachable!(),
1516 };
1517 self.rw_cpu(rd, a);
1518 Ok((a, b, c))
1519 }
1520
1521 fn execute_store(
1523 &mut self,
1524 instruction: &Instruction,
1525 ) -> Result<(u32, u32, u32), ExecutionError> {
1526 let (a, b, c, addr, memory_read_value) = self.store_rr(instruction);
1527
1528 let memory_store_value = match instruction.opcode {
1529 Opcode::SB => {
1530 let shift = (addr % 4) * 8;
1531 ((a & 0xFF) << shift) | (memory_read_value & !(0xFF << shift))
1532 }
1533 Opcode::SH => {
1534 if addr % 2 != 0 {
1535 return Err(ExecutionError::InvalidMemoryAccess(Opcode::SH, addr));
1536 }
1537 let shift = ((addr / 2) % 2) * 16;
1538 ((a & 0xFFFF) << shift) | (memory_read_value & !(0xFFFF << shift))
1539 }
1540 Opcode::SW => {
1541 if addr % 4 != 0 {
1542 return Err(ExecutionError::InvalidMemoryAccess(Opcode::SW, addr));
1543 }
1544 a
1545 }
1546 _ => unreachable!(),
1547 };
1548 self.mw_cpu(align(addr), memory_store_value);
1549 Ok((a, b, c))
1550 }
1551
1552 fn execute_branch(
1554 &mut self,
1555 instruction: &Instruction,
1556 mut next_pc: u32,
1557 ) -> (u32, u32, u32, u32) {
1558 let (a, b, c) = self.branch_rr(instruction);
1559 let branch = match instruction.opcode {
1560 Opcode::BEQ => a == b,
1561 Opcode::BNE => a != b,
1562 Opcode::BLT => (a as i32) < (b as i32),
1563 Opcode::BGE => (a as i32) >= (b as i32),
1564 Opcode::BLTU => a < b,
1565 Opcode::BGEU => a >= b,
1566 _ => {
1567 unreachable!()
1568 }
1569 };
1570 if branch {
1571 next_pc = self.state.pc.wrapping_add(c);
1572 }
1573 (a, b, c, next_pc)
1574 }
1575
1576 #[allow(clippy::type_complexity)]
1578 fn execute_ecall(
1579 &mut self,
1580 ) -> Result<(u32, u32, u32, u32, u32, SyscallCode, u32), ExecutionError> {
1581 let t0 = Register::X5;
1584 let syscall_id = self.register(t0);
1585 let c = self.rr_cpu(Register::X11, MemoryAccessPosition::C);
1586 let b = self.rr_cpu(Register::X10, MemoryAccessPosition::B);
1587 let syscall = SyscallCode::from_u32(syscall_id);
1588
1589 if self.print_report && !self.unconstrained {
1590 self.report.syscall_counts[syscall] += 1;
1591 }
1592
1593 if self.unconstrained &&
1600 (syscall != SyscallCode::EXIT_UNCONSTRAINED && syscall != SyscallCode::WRITE)
1601 {
1602 return Err(ExecutionError::InvalidSyscallUsage(syscall_id as u64));
1603 }
1604
1605 let syscall_for_count = syscall.count_map();
1607 let syscall_count = self.state.syscall_counts.entry(syscall_for_count).or_insert(0);
1608 *syscall_count += 1;
1609
1610 let syscall_impl = self.get_syscall(syscall).cloned();
1611 let mut precompile_rt = SyscallContext::new(self);
1612 let (a, precompile_next_pc, precompile_cycles, returned_exit_code) =
1613 if let Some(syscall_impl) = syscall_impl {
1614 let res = syscall_impl.execute(&mut precompile_rt, syscall, b, c);
1618 let a = if let Some(val) = res { val } else { syscall_id };
1619
1620 if syscall == SyscallCode::HALT && precompile_rt.exit_code != 0 {
1622 return Err(ExecutionError::HaltWithNonZeroExitCode(precompile_rt.exit_code));
1623 }
1624
1625 (a, precompile_rt.next_pc, syscall_impl.num_extra_cycles(), precompile_rt.exit_code)
1626 } else {
1627 return Err(ExecutionError::UnsupportedSyscall(syscall_id));
1628 };
1629
1630 if let (Some(estimator), Some(syscall_id)) =
1631 (&mut self.record_estimator, syscall.as_air_id())
1632 {
1633 let threshold = match syscall_id {
1634 RiscvAirId::ShaExtend => self.opts.split_opts.sha_extend,
1635 RiscvAirId::ShaCompress => self.opts.split_opts.sha_compress,
1636 RiscvAirId::KeccakPermute => self.opts.split_opts.keccak,
1637 _ => self.opts.split_opts.deferred,
1638 } as u64;
1639 let shards = &mut estimator.precompile_records[syscall_id];
1640 let local_memory_ct =
1641 estimator.current_precompile_touched_compressed_addresses.len() as u64;
1642 match shards.last_mut().filter(|shard| shard.0 < threshold) {
1643 Some((shard_precompile_event_ct, shard_local_memory_ct)) => {
1644 *shard_precompile_event_ct += 1;
1645 *shard_local_memory_ct += local_memory_ct;
1646 }
1647 None => shards.push((1, local_memory_ct)),
1648 }
1649 estimator.current_precompile_touched_compressed_addresses.clear();
1650 }
1651
1652 let (b, c) = if syscall == SyscallCode::EXIT_UNCONSTRAINED {
1656 (self.register(Register::X10), self.register(Register::X11))
1657 } else {
1658 (b, c)
1659 };
1660
1661 self.rw_cpu(t0, a);
1663 let clk = self.state.clk;
1664 self.state.clk += precompile_cycles;
1665
1666 Ok((a, b, c, clk, precompile_next_pc, syscall, returned_exit_code))
1667 }
1668
1669 fn execute_jump(&mut self, instruction: &Instruction) -> (u32, u32, u32, u32) {
1671 let (a, b, c, next_pc) = match instruction.opcode {
1672 Opcode::JAL => {
1673 let (rd, imm) = instruction.j_type();
1674 let (b, c) = (imm, 0);
1675 let a = self.state.pc + 4;
1676 self.rw_cpu(rd, a);
1677 let next_pc = self.state.pc.wrapping_add(imm);
1678 (a, b, c, next_pc)
1679 }
1680 Opcode::JALR => {
1681 let (rd, rs1, imm) = instruction.i_type();
1682 let (b, c) = (self.rr_cpu(rs1, MemoryAccessPosition::B), imm);
1683 let a = self.state.pc + 4;
1684 self.rw_cpu(rd, a);
1685 let next_pc = b.wrapping_add(c);
1686 (a, b, c, next_pc)
1687 }
1688 _ => unreachable!(),
1689 };
1690 (a, b, c, next_pc)
1691 }
1692
1693 #[inline]
1695 #[allow(clippy::too_many_lines)]
1696 fn execute_cycle(&mut self) -> Result<bool, ExecutionError> {
1697 let instruction = self.fetch();
1699
1700 self.log(&instruction);
1702
1703 self.execute_instruction(&instruction)?;
1705
1706 self.state.global_clk += 1;
1708
1709 if self.unconstrained {
1710 self.unconstrained_state.total_unconstrained_cycles += 1;
1711 }
1712
1713 if !self.unconstrained {
1714 let cpu_exit = self.max_syscall_cycles + self.state.clk >= self.shard_size;
1716
1717 let mut shape_match_found = true;
1721 if self.state.global_clk % self.shape_check_frequency == 0 {
1722 Self::estimate_riscv_event_counts(
1724 &mut self.event_counts,
1725 (self.state.clk >> 2) as u64,
1726 &self.local_counts,
1727 );
1728
1729 if self.lde_size_check {
1731 let padded_event_counts =
1732 pad_rv32im_event_counts(self.event_counts, self.shape_check_frequency);
1733 let padded_lde_size = estimate_riscv_lde_size(padded_event_counts, &self.costs);
1734 if padded_lde_size > self.lde_size_threshold {
1735 #[allow(clippy::cast_precision_loss)]
1736 let size_gib = (padded_lde_size as f64) / (1 << 9) as f64;
1737 tracing::warn!(
1738 "Stopping shard early since the estimated LDE size is too large: {:.3} GiB",
1739 size_gib
1740 );
1741 shape_match_found = false;
1742 }
1743 }
1744 else if let Some(maximal_shapes) = &self.maximal_shapes {
1746 let distance = |threshold: usize, count: usize| {
1747 if count != 0 {
1748 threshold - count
1749 } else {
1750 usize::MAX
1751 }
1752 };
1753
1754 shape_match_found = false;
1755
1756 for shape in maximal_shapes.iter() {
1757 let cpu_threshold = shape[CoreAirId::Cpu];
1758 if self.state.clk > ((1 << cpu_threshold) << 2) {
1759 continue;
1760 }
1761
1762 let mut l_infinity = usize::MAX;
1763 let mut shape_too_small = false;
1764 for air in CoreAirId::iter() {
1765 if air == CoreAirId::Cpu {
1766 continue;
1767 }
1768
1769 let threshold = 1 << shape[air];
1770 let count = self.event_counts[RiscvAirId::from(air)] as usize;
1771 if count > threshold {
1772 shape_too_small = true;
1773 break;
1774 }
1775
1776 if distance(threshold, count) < l_infinity {
1777 l_infinity = distance(threshold, count);
1778 }
1779 }
1780
1781 if shape_too_small {
1782 continue;
1783 }
1784
1785 if l_infinity >= 32 * (self.shape_check_frequency as usize) {
1786 shape_match_found = true;
1787 break;
1788 }
1789 }
1790
1791 if !shape_match_found {
1792 self.record.counts = Some(self.event_counts);
1793 tracing::debug!(
1794 "Stopping shard {} to stay within some maximal shape. clk = {} pc = 0x{:x?}",
1795 self.shard(),
1796 self.state.global_clk,
1797 self.state.pc,
1798 );
1799 }
1800 }
1801 }
1802
1803 if cpu_exit || !shape_match_found {
1804 self.bump_record();
1805 self.state.current_shard += 1;
1806 self.state.clk = 0;
1807 }
1808
1809 if let Some(max_cycles) = self.max_cycles {
1811 if self.state.global_clk > max_cycles {
1812 return Err(ExecutionError::ExceededCycleLimit(max_cycles));
1813 }
1814 }
1815 }
1816
1817 let done = self.state.pc == 0 ||
1818 self.state.pc.wrapping_sub(self.program.pc_base) >=
1819 (self.program.instructions.len() * 4) as u32;
1820 if done && self.unconstrained {
1821 tracing::error!("program ended in unconstrained mode at clk {}", self.state.global_clk);
1822 return Err(ExecutionError::EndInUnconstrained());
1823 }
1824 Ok(done)
1825 }
1826
1827 pub fn bump_record(&mut self) {
1829 if let Some(estimator) = &mut self.record_estimator {
1830 self.local_counts.local_mem = std::mem::take(&mut estimator.current_local_mem);
1831 Self::estimate_riscv_event_counts(
1832 &mut self.event_counts,
1833 (self.state.clk >> 2) as u64,
1834 &self.local_counts,
1835 );
1836 estimator.core_records.push(self.event_counts);
1838 estimator.current_touched_compressed_addresses.clear();
1839 }
1840 self.local_counts = LocalCounts::default();
1841 if self.executor_mode == ExecutorMode::Trace {
1843 for (_, event) in self.local_memory_access.drain() {
1844 self.record.cpu_local_memory_access.push(event);
1845 }
1846 }
1847
1848 let removed_record = std::mem::replace(
1849 &mut self.record,
1850 Box::new(ExecutionRecord::new(self.program.clone())),
1851 );
1852 let public_values = removed_record.public_values;
1853 self.record.public_values = public_values;
1854 self.records.push(removed_record);
1855 }
1856
1857 pub fn execute_record(
1864 &mut self,
1865 emit_global_memory_events: bool,
1866 ) -> Result<(Vec<Box<ExecutionRecord>>, bool), ExecutionError> {
1867 self.executor_mode = ExecutorMode::Trace;
1868 self.emit_global_memory_events = emit_global_memory_events;
1869 self.print_report = true;
1870 let done = self.execute()?;
1871 Ok((std::mem::take(&mut self.records), done))
1872 }
1873
1874 pub fn execute_state(
1881 &mut self,
1882 emit_global_memory_events: bool,
1883 ) -> Result<(ExecutionState, PublicValues<u32, u32>, bool), ExecutionError> {
1884 self.memory_checkpoint.clear();
1885 self.executor_mode = ExecutorMode::Checkpoint;
1886 self.emit_global_memory_events = emit_global_memory_events;
1887
1888 let memory = std::mem::take(&mut self.state.memory);
1890 let uninitialized_memory = std::mem::take(&mut self.state.uninitialized_memory);
1891 let proof_stream = std::mem::take(&mut self.state.proof_stream);
1892 let mut checkpoint = tracing::debug_span!("clone").in_scope(|| self.state.clone());
1893 self.state.memory = memory;
1894 self.state.uninitialized_memory = uninitialized_memory;
1895 self.state.proof_stream = proof_stream;
1896
1897 let done = tracing::debug_span!("execute").in_scope(|| self.execute())?;
1898 let next_pc = self.state.pc;
1901 tracing::debug_span!("create memory checkpoint").in_scope(|| {
1902 let replacement_memory_checkpoint = Memory::<_>::new_preallocated();
1903 let replacement_uninitialized_memory_checkpoint = Memory::<_>::new_preallocated();
1904 let memory_checkpoint =
1905 std::mem::replace(&mut self.memory_checkpoint, replacement_memory_checkpoint);
1906 let uninitialized_memory_checkpoint = std::mem::replace(
1907 &mut self.uninitialized_memory_checkpoint,
1908 replacement_uninitialized_memory_checkpoint,
1909 );
1910 if done && !self.emit_global_memory_events {
1911 checkpoint.memory.clone_from(&self.state.memory);
1915 memory_checkpoint.into_iter().for_each(|(addr, record)| {
1916 if let Some(record) = record {
1917 checkpoint.memory.insert(addr, record);
1918 } else {
1919 checkpoint.memory.remove(addr);
1920 }
1921 });
1922 checkpoint.uninitialized_memory = self.state.uninitialized_memory.clone();
1923 for (addr, is_old) in uninitialized_memory_checkpoint {
1925 if !is_old {
1926 checkpoint.uninitialized_memory.remove(addr);
1927 }
1928 }
1929 } else {
1930 checkpoint.memory = memory_checkpoint
1931 .into_iter()
1932 .filter_map(|(addr, record)| record.map(|record| (addr, record)))
1933 .collect();
1934 checkpoint.uninitialized_memory = uninitialized_memory_checkpoint
1935 .into_iter()
1936 .filter(|&(_, has_value)| has_value)
1937 .map(|(addr, _)| (addr, *self.state.uninitialized_memory.get(addr).unwrap()))
1938 .collect();
1939 }
1940 });
1941 let mut public_values = self.records.last().as_ref().unwrap().public_values;
1942 public_values.start_pc = next_pc;
1943 public_values.next_pc = next_pc;
1944 if !done {
1945 self.records.clear();
1946 }
1947 Ok((checkpoint, public_values, done))
1948 }
1949
1950 fn initialize(&mut self) {
1951 self.state.clk = 0;
1952
1953 tracing::debug!("loading memory image");
1954 for (&addr, value) in &self.program.memory_image {
1955 self.state.memory.insert(addr, MemoryRecord { value: *value, shard: 0, timestamp: 0 });
1956 }
1957 self.state.memory.insert(0, MemoryRecord { value: 0, shard: 0, timestamp: 0 });
1959 }
1960
1961 pub fn run_fast(&mut self) -> Result<(), ExecutionError> {
1967 self.executor_mode = ExecutorMode::Simple;
1968 self.print_report = true;
1969 while !self.execute()? {}
1970
1971 #[cfg(feature = "profiling")]
1972 if let Some((profiler, writer)) = self.profiler.take() {
1973 profiler.write(writer).expect("Failed to write profile to output file");
1974 }
1975
1976 Ok(())
1977 }
1978
1979 pub fn run_checkpoint(
1985 &mut self,
1986 emit_global_memory_events: bool,
1987 ) -> Result<(), ExecutionError> {
1988 self.executor_mode = ExecutorMode::Simple;
1989 self.print_report = true;
1990 while !self.execute_state(emit_global_memory_events)?.2 {}
1991 Ok(())
1992 }
1993
1994 pub fn run(&mut self) -> Result<(), ExecutionError> {
2000 self.executor_mode = ExecutorMode::Trace;
2001 self.print_report = true;
2002 while !self.execute()? {}
2003
2004 #[cfg(feature = "profiling")]
2005 if let Some((profiler, writer)) = self.profiler.take() {
2006 profiler.write(writer).expect("Failed to write profile to output file");
2007 }
2008
2009 Ok(())
2010 }
2011
2012 pub fn execute(&mut self) -> Result<bool, ExecutionError> {
2015 let program = self.program.clone();
2017
2018 let start_shard = self.state.current_shard;
2020
2021 if self.state.global_clk == 0 {
2023 self.initialize();
2024 }
2025
2026 let unconstrained_cycle_limit =
2027 std::env::var("UNCONSTRAINED_CYCLE_LIMIT").map(|v| v.parse::<u64>().unwrap()).ok();
2028
2029 let mut done = false;
2032 let mut current_shard = self.state.current_shard;
2033 let mut num_shards_executed = 0;
2034 loop {
2035 if self.execute_cycle()? {
2036 done = true;
2037 break;
2038 }
2039
2040 if let Some(unconstrained_cycle_limit) = unconstrained_cycle_limit {
2042 if self.unconstrained_state.total_unconstrained_cycles > unconstrained_cycle_limit {
2043 return Err(ExecutionError::UnconstrainedCycleLimitExceeded(
2044 unconstrained_cycle_limit,
2045 ));
2046 }
2047 }
2048
2049 if self.shard_batch_size > 0 && current_shard != self.state.current_shard {
2050 num_shards_executed += 1;
2051 current_shard = self.state.current_shard;
2052 if num_shards_executed == self.shard_batch_size {
2053 break;
2054 }
2055 }
2056 }
2057
2058 let public_values = self.record.public_values;
2060
2061 if done {
2062 self.postprocess();
2063
2064 self.bump_record();
2066
2067 if let Some(ref mut w) = self.io_options.stdout {
2069 if let Err(e) = w.flush() {
2070 tracing::error!("failed to flush stdout override: {e}");
2071 }
2072 }
2073
2074 if let Some(ref mut w) = self.io_options.stderr {
2075 if let Err(e) = w.flush() {
2076 tracing::error!("failed to flush stderr override: {e}");
2077 }
2078 }
2079 }
2080
2081 if !self.record.cpu_events.is_empty() {
2083 self.bump_record();
2084 }
2085
2086 let mut last_next_pc = 0;
2088 let mut last_exit_code = 0;
2089 for (i, record) in self.records.iter_mut().enumerate() {
2090 record.program = program.clone();
2091 record.public_values = public_values;
2092 record.public_values.committed_value_digest = public_values.committed_value_digest;
2093 record.public_values.deferred_proofs_digest = public_values.deferred_proofs_digest;
2094 record.public_values.execution_shard = start_shard + i as u32;
2095 if record.cpu_events.is_empty() {
2096 record.public_values.start_pc = last_next_pc;
2097 record.public_values.next_pc = last_next_pc;
2098 record.public_values.exit_code = last_exit_code;
2099 } else {
2100 record.public_values.start_pc = record.cpu_events[0].pc;
2101 record.public_values.next_pc = record.cpu_events.last().unwrap().next_pc;
2102 record.public_values.exit_code = record.cpu_events.last().unwrap().exit_code;
2103 last_next_pc = record.public_values.next_pc;
2104 last_exit_code = record.public_values.exit_code;
2105 }
2106 }
2107
2108 Ok(done)
2109 }
2110
2111 fn postprocess(&mut self) {
2112 for (fd, buf) in &self.io_buf {
2114 if !buf.is_empty() {
2115 match fd {
2116 1 => {
2117 eprintln!("stdout: {buf}");
2118 }
2119 2 => {
2120 eprintln!("stderr: {buf}");
2121 }
2122 _ => {}
2123 }
2124 }
2125 }
2126
2127 if self.state.proof_stream_ptr != self.state.proof_stream.len() {
2129 tracing::warn!(
2130 "Not all proofs were read. Proving will fail during recursion. Did you pass too
2131 many proofs in or forget to call verify_sp1_proof?"
2132 );
2133 }
2134
2135 if !self.state.input_stream.is_empty() {
2136 tracing::warn!("Not all input bytes were read.");
2137 }
2138
2139 if let Some(estimator) = &mut self.record_estimator {
2140 let touched_reg_ct =
2144 1 + (1..32).filter(|&r| self.state.memory.registers.get(r).is_some()).count();
2145 let total_mem = touched_reg_ct + self.state.memory.page_table.exact_len();
2146 estimator.memory_global_init_events = total_mem
2150 .checked_sub(self.record.program.memory_image.len())
2151 .expect("program memory image should be accounted for in memory exact len")
2152 as u64;
2153 estimator.memory_global_finalize_events = total_mem as u64;
2154 }
2155
2156 if self.emit_global_memory_events &&
2157 (self.executor_mode == ExecutorMode::Trace ||
2158 self.executor_mode == ExecutorMode::Checkpoint)
2159 {
2160 let memory_finalize_events = &mut self.record.global_memory_finalize_events;
2162 memory_finalize_events.reserve_exact(self.state.memory.page_table.estimate_len() + 32);
2163
2164 let addr_0_record = self.state.memory.get(0);
2167
2168 let addr_0_final_record = match addr_0_record {
2169 Some(record) => record,
2170 None => &MemoryRecord { value: 0, shard: 0, timestamp: 0 },
2171 };
2172 memory_finalize_events
2173 .push(MemoryInitializeFinalizeEvent::finalize_from_record(0, addr_0_final_record));
2174
2175 let memory_initialize_events = &mut self.record.global_memory_initialize_events;
2176 memory_initialize_events
2177 .reserve_exact(self.state.memory.page_table.estimate_len() + 32);
2178 let addr_0_initialize_event = MemoryInitializeFinalizeEvent::initialize(0, 0);
2179 memory_initialize_events.push(addr_0_initialize_event);
2180
2181 if self.print_report {
2184 self.report.touched_memory_addresses = 0;
2185 }
2186 for addr in 1..32 {
2187 let record = self.state.memory.registers.get(addr);
2188 if record.is_some() {
2189 if self.print_report {
2190 self.report.touched_memory_addresses += 1;
2191 }
2192 if !self.record.program.memory_image.contains_key(&addr) {
2196 let initial_value =
2197 self.state.uninitialized_memory.registers.get(addr).unwrap_or(&0);
2198 memory_initialize_events
2199 .push(MemoryInitializeFinalizeEvent::initialize(addr, *initial_value));
2200 }
2201
2202 let record = *record.unwrap();
2203 memory_finalize_events
2204 .push(MemoryInitializeFinalizeEvent::finalize_from_record(addr, &record));
2205 }
2206 }
2207 for addr in self.state.memory.page_table.keys() {
2208 if self.print_report {
2209 self.report.touched_memory_addresses += 1;
2210 }
2211
2212 if !self.record.program.memory_image.contains_key(&addr) {
2215 let initial_value = self.state.uninitialized_memory.get(addr).unwrap_or(&0);
2216 memory_initialize_events
2217 .push(MemoryInitializeFinalizeEvent::initialize(addr, *initial_value));
2218 }
2219
2220 let record = *self.state.memory.get(addr).unwrap();
2221 memory_finalize_events
2222 .push(MemoryInitializeFinalizeEvent::finalize_from_record(addr, &record));
2223 }
2224 }
2225 }
2226
2227 fn get_syscall(&mut self, code: SyscallCode) -> Option<&Arc<dyn Syscall>> {
2228 self.syscall_map.get(&code)
2229 }
2230
2231 fn estimate_riscv_event_counts(
2233 event_counts: &mut EnumMap<RiscvAirId, u64>,
2234 cpu_cycles: u64,
2235 local_counts: &LocalCounts,
2236 ) {
2237 let touched_addresses: u64 = local_counts.local_mem as u64;
2238 let syscalls_sent: u64 = local_counts.syscalls_sent as u64;
2239 let opcode_counts: &EnumMap<Opcode, u64> = &local_counts.event_counts;
2240
2241 event_counts[RiscvAirId::Cpu] = cpu_cycles;
2243
2244 event_counts[RiscvAirId::AddSub] = opcode_counts[Opcode::ADD] + opcode_counts[Opcode::SUB];
2246
2247 event_counts[RiscvAirId::Mul] = opcode_counts[Opcode::MUL] +
2249 opcode_counts[Opcode::MULH] +
2250 opcode_counts[Opcode::MULHU] +
2251 opcode_counts[Opcode::MULHSU];
2252
2253 event_counts[RiscvAirId::Bitwise] =
2255 opcode_counts[Opcode::XOR] + opcode_counts[Opcode::OR] + opcode_counts[Opcode::AND];
2256
2257 event_counts[RiscvAirId::ShiftLeft] = opcode_counts[Opcode::SLL];
2259
2260 event_counts[RiscvAirId::ShiftRight] =
2262 opcode_counts[Opcode::SRL] + opcode_counts[Opcode::SRA];
2263
2264 event_counts[RiscvAirId::DivRem] = opcode_counts[Opcode::DIV] +
2266 opcode_counts[Opcode::DIVU] +
2267 opcode_counts[Opcode::REM] +
2268 opcode_counts[Opcode::REMU];
2269
2270 event_counts[RiscvAirId::Lt] = opcode_counts[Opcode::SLT] + opcode_counts[Opcode::SLTU];
2272
2273 event_counts[RiscvAirId::MemoryLocal] =
2275 touched_addresses.div_ceil(NUM_LOCAL_MEMORY_ENTRIES_PER_ROW_EXEC as u64);
2276
2277 event_counts[RiscvAirId::Branch] = opcode_counts[Opcode::BEQ] +
2279 opcode_counts[Opcode::BNE] +
2280 opcode_counts[Opcode::BLT] +
2281 opcode_counts[Opcode::BGE] +
2282 opcode_counts[Opcode::BLTU] +
2283 opcode_counts[Opcode::BGEU];
2284
2285 event_counts[RiscvAirId::Jump] = opcode_counts[Opcode::JAL] + opcode_counts[Opcode::JALR];
2287
2288 event_counts[RiscvAirId::Auipc] = opcode_counts[Opcode::AUIPC] +
2290 opcode_counts[Opcode::UNIMP] +
2291 opcode_counts[Opcode::EBREAK];
2292
2293 event_counts[RiscvAirId::MemoryInstrs] = opcode_counts[Opcode::LB] +
2295 opcode_counts[Opcode::LH] +
2296 opcode_counts[Opcode::LW] +
2297 opcode_counts[Opcode::LBU] +
2298 opcode_counts[Opcode::LHU] +
2299 opcode_counts[Opcode::SB] +
2300 opcode_counts[Opcode::SH] +
2301 opcode_counts[Opcode::SW];
2302
2303 event_counts[RiscvAirId::SyscallInstrs] = opcode_counts[Opcode::ECALL];
2305
2306 event_counts[RiscvAirId::SyscallCore] = syscalls_sent;
2308
2309 event_counts[RiscvAirId::Global] =
2311 2 * touched_addresses + event_counts[RiscvAirId::SyscallInstrs];
2312
2313 event_counts[RiscvAirId::Mul] += event_counts[RiscvAirId::DivRem];
2315 event_counts[RiscvAirId::Lt] += event_counts[RiscvAirId::DivRem];
2316
2317 }
2320
2321 #[inline]
2322 fn log(&mut self, _: &Instruction) {
2323 #[cfg(feature = "profiling")]
2324 if let Some((ref mut profiler, _)) = self.profiler {
2325 if !self.unconstrained {
2326 profiler.record(self.state.global_clk, self.state.pc as u64);
2327 }
2328 }
2329
2330 if !self.unconstrained && self.state.global_clk % 10_000_000 == 0 {
2331 tracing::info!("clk = {} pc = 0x{:x?}", self.state.global_clk, self.state.pc);
2332 }
2333 }
2334}
2335
2336impl Default for ExecutorMode {
2337 fn default() -> Self {
2338 Self::Simple
2339 }
2340}
2341
2342#[must_use]
2344pub const fn align(addr: u32) -> u32 {
2345 addr - addr % 4
2346}
2347
2348#[cfg(test)]
2349mod tests {
2350
2351 use sp1_stark::SP1CoreOpts;
2352 use sp1_zkvm::syscalls::SHA_COMPRESS;
2353
2354 use crate::programs::tests::{
2355 fibonacci_program, panic_program, secp256r1_add_program, secp256r1_double_program,
2356 simple_memory_program, simple_program, ssz_withdrawals_program, u256xu2048_mul_program,
2357 };
2358
2359 use crate::{Register, SP1Context};
2360
2361 use super::{Executor, Instruction, Opcode, Program};
2362
2363 fn _assert_send<T: Send>() {}
2364
2365 fn _assert_runtime_is_send() {
2367 #[allow(clippy::used_underscore_items)]
2368 _assert_send::<Executor>();
2369 }
2370
2371 #[test]
2372 fn test_simple_program_run() {
2373 let program = simple_program();
2374 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2375 runtime.run().unwrap();
2376 assert_eq!(runtime.register(Register::X31), 42);
2377 }
2378
2379 #[test]
2380 fn test_fibonacci_program_run() {
2381 let program = fibonacci_program();
2382 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2383 runtime.run().unwrap();
2384 }
2385
2386 #[test]
2387 fn test_fibonacci_program_run_with_max_cycles() {
2388 let program = fibonacci_program();
2389 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2390 runtime.run().unwrap();
2391
2392 let max_cycles = runtime.state.global_clk;
2393
2394 let program = fibonacci_program();
2395 let context = SP1Context::builder().max_cycles(max_cycles).build();
2396 let mut runtime = Executor::with_context(program, SP1CoreOpts::default(), context);
2397 runtime.run().unwrap();
2398 }
2399
2400 #[test]
2401 fn test_secp256r1_add_program_run() {
2402 let program = secp256r1_add_program();
2403 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2404 runtime.run().unwrap();
2405 }
2406
2407 #[test]
2408 fn test_secp256r1_double_program_run() {
2409 let program = secp256r1_double_program();
2410 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2411 runtime.run().unwrap();
2412 }
2413
2414 #[test]
2415 fn test_u256xu2048_mul() {
2416 let program = u256xu2048_mul_program();
2417 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2418 runtime.run().unwrap();
2419 }
2420
2421 #[test]
2422 fn test_ssz_withdrawals_program_run() {
2423 let program = ssz_withdrawals_program();
2424 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2425 runtime.run().unwrap();
2426 }
2427
2428 #[test]
2429 #[should_panic]
2430 fn test_panic() {
2431 let program = panic_program();
2432 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2433 runtime.run().unwrap();
2434 }
2435
2436 #[test]
2437 fn test_add() {
2438 let instructions = vec![
2443 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2444 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2445 Instruction::new(Opcode::ADD, 31, 30, 29, false, false),
2446 ];
2447 let program = Program::new(instructions, 0, 0);
2448 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2449 runtime.run().unwrap();
2450 assert_eq!(runtime.register(Register::X31), 42);
2451 }
2452
2453 #[test]
2454 fn test_sub() {
2455 let instructions = vec![
2459 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2460 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2461 Instruction::new(Opcode::SUB, 31, 30, 29, false, false),
2462 ];
2463 let program = Program::new(instructions, 0, 0);
2464
2465 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2466 runtime.run().unwrap();
2467 assert_eq!(runtime.register(Register::X31), 32);
2468 }
2469
2470 #[test]
2471 fn test_xor() {
2472 let instructions = vec![
2476 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2477 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2478 Instruction::new(Opcode::XOR, 31, 30, 29, false, false),
2479 ];
2480 let program = Program::new(instructions, 0, 0);
2481
2482 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2483 runtime.run().unwrap();
2484 assert_eq!(runtime.register(Register::X31), 32);
2485 }
2486
2487 #[test]
2488 fn test_or() {
2489 let instructions = vec![
2493 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2494 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2495 Instruction::new(Opcode::OR, 31, 30, 29, false, false),
2496 ];
2497 let program = Program::new(instructions, 0, 0);
2498
2499 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2500
2501 runtime.run().unwrap();
2502 assert_eq!(runtime.register(Register::X31), 37);
2503 }
2504
2505 #[test]
2506 fn test_and() {
2507 let instructions = vec![
2511 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2512 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2513 Instruction::new(Opcode::AND, 31, 30, 29, false, false),
2514 ];
2515 let program = Program::new(instructions, 0, 0);
2516
2517 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2518 runtime.run().unwrap();
2519 assert_eq!(runtime.register(Register::X31), 5);
2520 }
2521
2522 #[test]
2523 fn test_sll() {
2524 let instructions = vec![
2528 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2529 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2530 Instruction::new(Opcode::SLL, 31, 30, 29, false, false),
2531 ];
2532 let program = Program::new(instructions, 0, 0);
2533
2534 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2535 runtime.run().unwrap();
2536 assert_eq!(runtime.register(Register::X31), 1184);
2537 }
2538
2539 #[test]
2540 fn test_srl() {
2541 let instructions = vec![
2545 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2546 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2547 Instruction::new(Opcode::SRL, 31, 30, 29, false, false),
2548 ];
2549 let program = Program::new(instructions, 0, 0);
2550
2551 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2552 runtime.run().unwrap();
2553 assert_eq!(runtime.register(Register::X31), 1);
2554 }
2555
2556 #[test]
2557 fn test_sra() {
2558 let instructions = vec![
2562 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2563 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2564 Instruction::new(Opcode::SRA, 31, 30, 29, false, false),
2565 ];
2566 let program = Program::new(instructions, 0, 0);
2567
2568 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2569 runtime.run().unwrap();
2570 assert_eq!(runtime.register(Register::X31), 1);
2571 }
2572
2573 #[test]
2574 fn test_slt() {
2575 let instructions = vec![
2579 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2580 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2581 Instruction::new(Opcode::SLT, 31, 30, 29, false, false),
2582 ];
2583 let program = Program::new(instructions, 0, 0);
2584
2585 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2586 runtime.run().unwrap();
2587 assert_eq!(runtime.register(Register::X31), 0);
2588 }
2589
2590 #[test]
2591 fn test_sltu() {
2592 let instructions = vec![
2596 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2597 Instruction::new(Opcode::ADD, 30, 0, 37, false, true),
2598 Instruction::new(Opcode::SLTU, 31, 30, 29, false, false),
2599 ];
2600 let program = Program::new(instructions, 0, 0);
2601
2602 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2603 runtime.run().unwrap();
2604 assert_eq!(runtime.register(Register::X31), 0);
2605 }
2606
2607 #[test]
2608 fn test_addi() {
2609 let instructions = vec![
2613 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2614 Instruction::new(Opcode::ADD, 30, 29, 37, false, true),
2615 Instruction::new(Opcode::ADD, 31, 30, 42, false, true),
2616 ];
2617 let program = Program::new(instructions, 0, 0);
2618
2619 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2620 runtime.run().unwrap();
2621 assert_eq!(runtime.register(Register::X31), 84);
2622 }
2623
2624 #[test]
2625 fn test_addi_negative() {
2626 let instructions = vec![
2630 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2631 Instruction::new(Opcode::ADD, 30, 29, 0xFFFF_FFFF, false, true),
2632 Instruction::new(Opcode::ADD, 31, 30, 4, false, true),
2633 ];
2634 let program = Program::new(instructions, 0, 0);
2635 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2636 runtime.run().unwrap();
2637 assert_eq!(runtime.register(Register::X31), 5 - 1 + 4);
2638 }
2639
2640 #[test]
2641 fn test_xori() {
2642 let instructions = vec![
2646 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2647 Instruction::new(Opcode::XOR, 30, 29, 37, false, true),
2648 Instruction::new(Opcode::XOR, 31, 30, 42, false, true),
2649 ];
2650 let program = Program::new(instructions, 0, 0);
2651 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2652 runtime.run().unwrap();
2653 assert_eq!(runtime.register(Register::X31), 10);
2654 }
2655
2656 #[test]
2657 fn test_ori() {
2658 let instructions = vec![
2662 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2663 Instruction::new(Opcode::OR, 30, 29, 37, false, true),
2664 Instruction::new(Opcode::OR, 31, 30, 42, false, true),
2665 ];
2666 let program = Program::new(instructions, 0, 0);
2667 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2668 runtime.run().unwrap();
2669 assert_eq!(runtime.register(Register::X31), 47);
2670 }
2671
2672 #[test]
2673 fn test_andi() {
2674 let instructions = vec![
2678 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2679 Instruction::new(Opcode::AND, 30, 29, 37, false, true),
2680 Instruction::new(Opcode::AND, 31, 30, 42, false, true),
2681 ];
2682 let program = Program::new(instructions, 0, 0);
2683 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2684 runtime.run().unwrap();
2685 assert_eq!(runtime.register(Register::X31), 0);
2686 }
2687
2688 #[test]
2689 fn test_slli() {
2690 let instructions = vec![
2693 Instruction::new(Opcode::ADD, 29, 0, 5, false, true),
2694 Instruction::new(Opcode::SLL, 31, 29, 4, false, true),
2695 ];
2696 let program = Program::new(instructions, 0, 0);
2697 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2698 runtime.run().unwrap();
2699 assert_eq!(runtime.register(Register::X31), 80);
2700 }
2701
2702 #[test]
2703 fn test_srli() {
2704 let instructions = vec![
2707 Instruction::new(Opcode::ADD, 29, 0, 42, false, true),
2708 Instruction::new(Opcode::SRL, 31, 29, 4, false, true),
2709 ];
2710 let program = Program::new(instructions, 0, 0);
2711 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2712 runtime.run().unwrap();
2713 assert_eq!(runtime.register(Register::X31), 2);
2714 }
2715
2716 #[test]
2717 fn test_srai() {
2718 let instructions = vec![
2721 Instruction::new(Opcode::ADD, 29, 0, 42, false, true),
2722 Instruction::new(Opcode::SRA, 31, 29, 4, false, true),
2723 ];
2724 let program = Program::new(instructions, 0, 0);
2725 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2726 runtime.run().unwrap();
2727 assert_eq!(runtime.register(Register::X31), 2);
2728 }
2729
2730 #[test]
2731 fn test_slti() {
2732 let instructions = vec![
2735 Instruction::new(Opcode::ADD, 29, 0, 42, false, true),
2736 Instruction::new(Opcode::SLT, 31, 29, 37, false, true),
2737 ];
2738 let program = Program::new(instructions, 0, 0);
2739 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2740 runtime.run().unwrap();
2741 assert_eq!(runtime.register(Register::X31), 0);
2742 }
2743
2744 #[test]
2745 fn test_sltiu() {
2746 let instructions = vec![
2749 Instruction::new(Opcode::ADD, 29, 0, 42, false, true),
2750 Instruction::new(Opcode::SLTU, 31, 29, 37, false, true),
2751 ];
2752 let program = Program::new(instructions, 0, 0);
2753 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2754 runtime.run().unwrap();
2755 assert_eq!(runtime.register(Register::X31), 0);
2756 }
2757
2758 #[test]
2759 fn test_jalr() {
2760 let instructions = vec![
2768 Instruction::new(Opcode::ADD, 11, 11, 100, false, true),
2769 Instruction::new(Opcode::JALR, 5, 11, 8, false, true),
2770 ];
2771 let program = Program::new(instructions, 0, 0);
2772 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2773 runtime.run().unwrap();
2774 assert_eq!(runtime.registers()[Register::X5 as usize], 8);
2775 assert_eq!(runtime.registers()[Register::X11 as usize], 100);
2776 assert_eq!(runtime.state.pc, 108);
2777 }
2778
2779 fn simple_op_code_test(opcode: Opcode, expected: u32, a: u32, b: u32) {
2780 let instructions = vec![
2781 Instruction::new(Opcode::ADD, 10, 0, a, false, true),
2782 Instruction::new(Opcode::ADD, 11, 0, b, false, true),
2783 Instruction::new(opcode, 12, 10, 11, false, false),
2784 ];
2785 let program = Program::new(instructions, 0, 0);
2786 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2787 runtime.run().unwrap();
2788 assert_eq!(runtime.registers()[Register::X12 as usize], expected);
2789 }
2790
2791 #[test]
2792 #[allow(clippy::unreadable_literal)]
2793 fn multiplication_tests() {
2794 simple_op_code_test(Opcode::MULHU, 0x00000000, 0x00000000, 0x00000000);
2795 simple_op_code_test(Opcode::MULHU, 0x00000000, 0x00000001, 0x00000001);
2796 simple_op_code_test(Opcode::MULHU, 0x00000000, 0x00000003, 0x00000007);
2797 simple_op_code_test(Opcode::MULHU, 0x00000000, 0x00000000, 0xffff8000);
2798 simple_op_code_test(Opcode::MULHU, 0x00000000, 0x80000000, 0x00000000);
2799 simple_op_code_test(Opcode::MULHU, 0x7fffc000, 0x80000000, 0xffff8000);
2800 simple_op_code_test(Opcode::MULHU, 0x0001fefe, 0xaaaaaaab, 0x0002fe7d);
2801 simple_op_code_test(Opcode::MULHU, 0x0001fefe, 0x0002fe7d, 0xaaaaaaab);
2802 simple_op_code_test(Opcode::MULHU, 0xfe010000, 0xff000000, 0xff000000);
2803 simple_op_code_test(Opcode::MULHU, 0xfffffffe, 0xffffffff, 0xffffffff);
2804 simple_op_code_test(Opcode::MULHU, 0x00000000, 0xffffffff, 0x00000001);
2805 simple_op_code_test(Opcode::MULHU, 0x00000000, 0x00000001, 0xffffffff);
2806
2807 simple_op_code_test(Opcode::MULHSU, 0x00000000, 0x00000000, 0x00000000);
2808 simple_op_code_test(Opcode::MULHSU, 0x00000000, 0x00000001, 0x00000001);
2809 simple_op_code_test(Opcode::MULHSU, 0x00000000, 0x00000003, 0x00000007);
2810 simple_op_code_test(Opcode::MULHSU, 0x00000000, 0x00000000, 0xffff8000);
2811 simple_op_code_test(Opcode::MULHSU, 0x00000000, 0x80000000, 0x00000000);
2812 simple_op_code_test(Opcode::MULHSU, 0x80004000, 0x80000000, 0xffff8000);
2813 simple_op_code_test(Opcode::MULHSU, 0xffff0081, 0xaaaaaaab, 0x0002fe7d);
2814 simple_op_code_test(Opcode::MULHSU, 0x0001fefe, 0x0002fe7d, 0xaaaaaaab);
2815 simple_op_code_test(Opcode::MULHSU, 0xff010000, 0xff000000, 0xff000000);
2816 simple_op_code_test(Opcode::MULHSU, 0xffffffff, 0xffffffff, 0xffffffff);
2817 simple_op_code_test(Opcode::MULHSU, 0xffffffff, 0xffffffff, 0x00000001);
2818 simple_op_code_test(Opcode::MULHSU, 0x00000000, 0x00000001, 0xffffffff);
2819
2820 simple_op_code_test(Opcode::MULH, 0x00000000, 0x00000000, 0x00000000);
2821 simple_op_code_test(Opcode::MULH, 0x00000000, 0x00000001, 0x00000001);
2822 simple_op_code_test(Opcode::MULH, 0x00000000, 0x00000003, 0x00000007);
2823 simple_op_code_test(Opcode::MULH, 0x00000000, 0x00000000, 0xffff8000);
2824 simple_op_code_test(Opcode::MULH, 0x00000000, 0x80000000, 0x00000000);
2825 simple_op_code_test(Opcode::MULH, 0x00000000, 0x80000000, 0x00000000);
2826 simple_op_code_test(Opcode::MULH, 0xffff0081, 0xaaaaaaab, 0x0002fe7d);
2827 simple_op_code_test(Opcode::MULH, 0xffff0081, 0x0002fe7d, 0xaaaaaaab);
2828 simple_op_code_test(Opcode::MULH, 0x00010000, 0xff000000, 0xff000000);
2829 simple_op_code_test(Opcode::MULH, 0x00000000, 0xffffffff, 0xffffffff);
2830 simple_op_code_test(Opcode::MULH, 0xffffffff, 0xffffffff, 0x00000001);
2831 simple_op_code_test(Opcode::MULH, 0xffffffff, 0x00000001, 0xffffffff);
2832
2833 simple_op_code_test(Opcode::MUL, 0x00001200, 0x00007e00, 0xb6db6db7);
2834 simple_op_code_test(Opcode::MUL, 0x00001240, 0x00007fc0, 0xb6db6db7);
2835 simple_op_code_test(Opcode::MUL, 0x00000000, 0x00000000, 0x00000000);
2836 simple_op_code_test(Opcode::MUL, 0x00000001, 0x00000001, 0x00000001);
2837 simple_op_code_test(Opcode::MUL, 0x00000015, 0x00000003, 0x00000007);
2838 simple_op_code_test(Opcode::MUL, 0x00000000, 0x00000000, 0xffff8000);
2839 simple_op_code_test(Opcode::MUL, 0x00000000, 0x80000000, 0x00000000);
2840 simple_op_code_test(Opcode::MUL, 0x00000000, 0x80000000, 0xffff8000);
2841 simple_op_code_test(Opcode::MUL, 0x0000ff7f, 0xaaaaaaab, 0x0002fe7d);
2842 simple_op_code_test(Opcode::MUL, 0x0000ff7f, 0x0002fe7d, 0xaaaaaaab);
2843 simple_op_code_test(Opcode::MUL, 0x00000000, 0xff000000, 0xff000000);
2844 simple_op_code_test(Opcode::MUL, 0x00000001, 0xffffffff, 0xffffffff);
2845 simple_op_code_test(Opcode::MUL, 0xffffffff, 0xffffffff, 0x00000001);
2846 simple_op_code_test(Opcode::MUL, 0xffffffff, 0x00000001, 0xffffffff);
2847 }
2848
2849 fn neg(a: u32) -> u32 {
2850 u32::MAX - a + 1
2851 }
2852
2853 #[test]
2854 fn division_tests() {
2855 simple_op_code_test(Opcode::DIVU, 3, 20, 6);
2856 simple_op_code_test(Opcode::DIVU, 715_827_879, u32::MAX - 20 + 1, 6);
2857 simple_op_code_test(Opcode::DIVU, 0, 20, u32::MAX - 6 + 1);
2858 simple_op_code_test(Opcode::DIVU, 0, u32::MAX - 20 + 1, u32::MAX - 6 + 1);
2859
2860 simple_op_code_test(Opcode::DIVU, 1 << 31, 1 << 31, 1);
2861 simple_op_code_test(Opcode::DIVU, 0, 1 << 31, u32::MAX - 1 + 1);
2862
2863 simple_op_code_test(Opcode::DIVU, u32::MAX, 1 << 31, 0);
2864 simple_op_code_test(Opcode::DIVU, u32::MAX, 1, 0);
2865 simple_op_code_test(Opcode::DIVU, u32::MAX, 0, 0);
2866
2867 simple_op_code_test(Opcode::DIV, 3, 18, 6);
2868 simple_op_code_test(Opcode::DIV, neg(6), neg(24), 4);
2869 simple_op_code_test(Opcode::DIV, neg(2), 16, neg(8));
2870 simple_op_code_test(Opcode::DIV, neg(1), 0, 0);
2871
2872 simple_op_code_test(Opcode::DIV, 1 << 31, 1 << 31, neg(1));
2874 simple_op_code_test(Opcode::REM, 0, 1 << 31, neg(1));
2875 }
2876
2877 #[test]
2878 fn remainder_tests() {
2879 simple_op_code_test(Opcode::REM, 7, 16, 9);
2880 simple_op_code_test(Opcode::REM, neg(4), neg(22), 6);
2881 simple_op_code_test(Opcode::REM, 1, 25, neg(3));
2882 simple_op_code_test(Opcode::REM, neg(2), neg(22), neg(4));
2883 simple_op_code_test(Opcode::REM, 0, 873, 1);
2884 simple_op_code_test(Opcode::REM, 0, 873, neg(1));
2885 simple_op_code_test(Opcode::REM, 5, 5, 0);
2886 simple_op_code_test(Opcode::REM, neg(5), neg(5), 0);
2887 simple_op_code_test(Opcode::REM, 0, 0, 0);
2888
2889 simple_op_code_test(Opcode::REMU, 4, 18, 7);
2890 simple_op_code_test(Opcode::REMU, 6, neg(20), 11);
2891 simple_op_code_test(Opcode::REMU, 23, 23, neg(6));
2892 simple_op_code_test(Opcode::REMU, neg(21), neg(21), neg(11));
2893 simple_op_code_test(Opcode::REMU, 5, 5, 0);
2894 simple_op_code_test(Opcode::REMU, neg(1), neg(1), 0);
2895 simple_op_code_test(Opcode::REMU, 0, 0, 0);
2896 }
2897
2898 #[test]
2899 #[allow(clippy::unreadable_literal)]
2900 fn shift_tests() {
2901 simple_op_code_test(Opcode::SLL, 0x00000001, 0x00000001, 0);
2902 simple_op_code_test(Opcode::SLL, 0x00000002, 0x00000001, 1);
2903 simple_op_code_test(Opcode::SLL, 0x00000080, 0x00000001, 7);
2904 simple_op_code_test(Opcode::SLL, 0x00004000, 0x00000001, 14);
2905 simple_op_code_test(Opcode::SLL, 0x80000000, 0x00000001, 31);
2906 simple_op_code_test(Opcode::SLL, 0xffffffff, 0xffffffff, 0);
2907 simple_op_code_test(Opcode::SLL, 0xfffffffe, 0xffffffff, 1);
2908 simple_op_code_test(Opcode::SLL, 0xffffff80, 0xffffffff, 7);
2909 simple_op_code_test(Opcode::SLL, 0xffffc000, 0xffffffff, 14);
2910 simple_op_code_test(Opcode::SLL, 0x80000000, 0xffffffff, 31);
2911 simple_op_code_test(Opcode::SLL, 0x21212121, 0x21212121, 0);
2912 simple_op_code_test(Opcode::SLL, 0x42424242, 0x21212121, 1);
2913 simple_op_code_test(Opcode::SLL, 0x90909080, 0x21212121, 7);
2914 simple_op_code_test(Opcode::SLL, 0x48484000, 0x21212121, 14);
2915 simple_op_code_test(Opcode::SLL, 0x80000000, 0x21212121, 31);
2916 simple_op_code_test(Opcode::SLL, 0x21212121, 0x21212121, 0xffffffe0);
2917 simple_op_code_test(Opcode::SLL, 0x42424242, 0x21212121, 0xffffffe1);
2918 simple_op_code_test(Opcode::SLL, 0x90909080, 0x21212121, 0xffffffe7);
2919 simple_op_code_test(Opcode::SLL, 0x48484000, 0x21212121, 0xffffffee);
2920 simple_op_code_test(Opcode::SLL, 0x00000000, 0x21212120, 0xffffffff);
2921
2922 simple_op_code_test(Opcode::SRL, 0xffff8000, 0xffff8000, 0);
2923 simple_op_code_test(Opcode::SRL, 0x7fffc000, 0xffff8000, 1);
2924 simple_op_code_test(Opcode::SRL, 0x01ffff00, 0xffff8000, 7);
2925 simple_op_code_test(Opcode::SRL, 0x0003fffe, 0xffff8000, 14);
2926 simple_op_code_test(Opcode::SRL, 0x0001ffff, 0xffff8001, 15);
2927 simple_op_code_test(Opcode::SRL, 0xffffffff, 0xffffffff, 0);
2928 simple_op_code_test(Opcode::SRL, 0x7fffffff, 0xffffffff, 1);
2929 simple_op_code_test(Opcode::SRL, 0x01ffffff, 0xffffffff, 7);
2930 simple_op_code_test(Opcode::SRL, 0x0003ffff, 0xffffffff, 14);
2931 simple_op_code_test(Opcode::SRL, 0x00000001, 0xffffffff, 31);
2932 simple_op_code_test(Opcode::SRL, 0x21212121, 0x21212121, 0);
2933 simple_op_code_test(Opcode::SRL, 0x10909090, 0x21212121, 1);
2934 simple_op_code_test(Opcode::SRL, 0x00424242, 0x21212121, 7);
2935 simple_op_code_test(Opcode::SRL, 0x00008484, 0x21212121, 14);
2936 simple_op_code_test(Opcode::SRL, 0x00000000, 0x21212121, 31);
2937 simple_op_code_test(Opcode::SRL, 0x21212121, 0x21212121, 0xffffffe0);
2938 simple_op_code_test(Opcode::SRL, 0x10909090, 0x21212121, 0xffffffe1);
2939 simple_op_code_test(Opcode::SRL, 0x00424242, 0x21212121, 0xffffffe7);
2940 simple_op_code_test(Opcode::SRL, 0x00008484, 0x21212121, 0xffffffee);
2941 simple_op_code_test(Opcode::SRL, 0x00000000, 0x21212121, 0xffffffff);
2942
2943 simple_op_code_test(Opcode::SRA, 0x00000000, 0x00000000, 0);
2944 simple_op_code_test(Opcode::SRA, 0xc0000000, 0x80000000, 1);
2945 simple_op_code_test(Opcode::SRA, 0xff000000, 0x80000000, 7);
2946 simple_op_code_test(Opcode::SRA, 0xfffe0000, 0x80000000, 14);
2947 simple_op_code_test(Opcode::SRA, 0xffffffff, 0x80000001, 31);
2948 simple_op_code_test(Opcode::SRA, 0x7fffffff, 0x7fffffff, 0);
2949 simple_op_code_test(Opcode::SRA, 0x3fffffff, 0x7fffffff, 1);
2950 simple_op_code_test(Opcode::SRA, 0x00ffffff, 0x7fffffff, 7);
2951 simple_op_code_test(Opcode::SRA, 0x0001ffff, 0x7fffffff, 14);
2952 simple_op_code_test(Opcode::SRA, 0x00000000, 0x7fffffff, 31);
2953 simple_op_code_test(Opcode::SRA, 0x81818181, 0x81818181, 0);
2954 simple_op_code_test(Opcode::SRA, 0xc0c0c0c0, 0x81818181, 1);
2955 simple_op_code_test(Opcode::SRA, 0xff030303, 0x81818181, 7);
2956 simple_op_code_test(Opcode::SRA, 0xfffe0606, 0x81818181, 14);
2957 simple_op_code_test(Opcode::SRA, 0xffffffff, 0x81818181, 31);
2958 }
2959
2960 #[test]
2961 #[allow(clippy::unreadable_literal)]
2962 fn test_simple_memory_program_run() {
2963 let program = simple_memory_program();
2964 let mut runtime = Executor::new(program, SP1CoreOpts::default());
2965 runtime.run().unwrap();
2966
2967 assert_eq!(runtime.register(Register::X28), 0x12348765);
2969
2970 assert_eq!(runtime.register(Register::X27), 0x65);
2972 assert_eq!(runtime.register(Register::X26), 0x87);
2973 assert_eq!(runtime.register(Register::X25), 0x34);
2974 assert_eq!(runtime.register(Register::X24), 0x12);
2975
2976 assert_eq!(runtime.register(Register::X23), 0x65);
2978 assert_eq!(runtime.register(Register::X22), 0xffffff87);
2979
2980 assert_eq!(runtime.register(Register::X21), 0x8765);
2982 assert_eq!(runtime.register(Register::X20), 0x1234);
2983
2984 assert_eq!(runtime.register(Register::X19), 0xffff8765);
2986 assert_eq!(runtime.register(Register::X18), 0x1234);
2987
2988 assert_eq!(runtime.register(Register::X16), 0x12348725);
2990 assert_eq!(runtime.register(Register::X15), 0x12342525);
2991 assert_eq!(runtime.register(Register::X14), 0x12252525);
2992 assert_eq!(runtime.register(Register::X13), 0x25252525);
2993
2994 assert_eq!(runtime.register(Register::X12), 0x12346525);
2996 assert_eq!(runtime.register(Register::X11), 0x65256525);
2997 }
2998
2999 #[test]
3000 #[should_panic]
3001 fn test_invalid_address_access_sw() {
3002 let instructions = vec![
3003 Instruction::new(Opcode::ADD, 29, 0, 20, false, true),
3004 Instruction::new(Opcode::SW, 0, 29, 0, false, true),
3005 ];
3006
3007 let program = Program::new(instructions, 0, 0);
3008 let mut runtime = Executor::new(program, SP1CoreOpts::default());
3009 runtime.run().unwrap();
3010 }
3011
3012 #[test]
3013 #[should_panic]
3014 fn test_invalid_address_access_lw() {
3015 let instructions = vec![
3016 Instruction::new(Opcode::ADD, 29, 0, 20, false, true),
3017 Instruction::new(Opcode::LW, 29, 29, 0, false, true),
3018 ];
3019
3020 let program = Program::new(instructions, 0, 0);
3021 let mut runtime = Executor::new(program, SP1CoreOpts::default());
3022 runtime.run().unwrap();
3023 }
3024
3025 #[test]
3026 #[should_panic]
3027 fn test_invalid_address_syscall() {
3028 let instructions = vec![
3029 Instruction::new(Opcode::ADD, 5, 0, SHA_COMPRESS, false, true),
3030 Instruction::new(Opcode::ADD, 10, 0, 10, false, true),
3031 Instruction::new(Opcode::ADD, 11, 10, 20, false, true),
3032 Instruction::new(Opcode::ECALL, 5, 10, 11, false, false),
3033 ];
3034
3035 let program = Program::new(instructions, 0, 0);
3036 let mut runtime = Executor::new(program, SP1CoreOpts::default());
3037 runtime.run().unwrap();
3038 }
3039}