Skip to main content

sp1_core_executor/
record.rs

1use deepsize2::DeepSizeOf;
2use hashbrown::HashMap;
3use slop_air::AirBuilder;
4use slop_algebra::{AbstractField, Field, PrimeField, PrimeField32};
5use sp1_hypercube::{
6    air::{
7        AirInteraction, BaseAirBuilder, InteractionScope, MachineAir, PublicValues, SP1AirBuilder,
8        PROOF_NONCE_NUM_WORDS, PV_DIGEST_NUM_WORDS, SP1_PROOF_NUM_PV_ELTS,
9    },
10    septic_digest::SepticDigest,
11    shape::Shape,
12    InteractionKind, MachineRecord,
13};
14use std::{
15    borrow::Borrow,
16    iter::once,
17    mem::take,
18    sync::{Arc, Mutex},
19};
20
21use serde::{Deserialize, Serialize};
22
23use crate::{
24    events::{
25        AluEvent, BranchEvent, ByteLookupEvent, ByteRecord, GlobalInteractionEvent,
26        InstructionDecodeEvent, InstructionFetchEvent, JumpEvent, MemInstrEvent,
27        MemoryInitializeFinalizeEvent, MemoryLocalEvent, MemoryRecordEnum,
28        PageProtInitializeFinalizeEvent, PageProtLocalEvent, PrecompileEvent, PrecompileEvents,
29        SyscallEvent, UTypeEvent,
30    },
31    program::Program,
32    ByteOpcode, Instruction, RetainedEventsPreset, RiscvAirId, SplitOpts, SyscallCode,
33};
34
35/// A record of the execution of a program.
36///
37/// The trace of the execution is represented as a list of "events" that occur every cycle.
38#[derive(Clone, Debug, Serialize, Deserialize, Default, DeepSizeOf)]
39pub struct ExecutionRecord {
40    /// The program.
41    pub program: Arc<Program>,
42    /// The number of CPU related events.
43    pub cpu_event_count: u32,
44    /// A trace of the ADD, and ADDI events.
45    pub add_events: Vec<(AluEvent, RTypeRecord)>,
46    /// A trace of the ADDW events.
47    pub addw_events: Vec<(AluEvent, ALUTypeRecord)>,
48    /// A trace of the ADDI events.
49    pub addi_events: Vec<(AluEvent, ITypeRecord)>,
50    /// A trace of the MUL events.
51    pub mul_events: Vec<(AluEvent, RTypeRecord)>,
52    /// A trace of the SUB events.
53    pub sub_events: Vec<(AluEvent, RTypeRecord)>,
54    /// A trace of the SUBW events.
55    pub subw_events: Vec<(AluEvent, RTypeRecord)>,
56    /// A trace of the XOR, XORI, OR, ORI, AND, and ANDI events.
57    pub bitwise_events: Vec<(AluEvent, ALUTypeRecord)>,
58    /// A trace of the SLL and SLLI events.
59    pub shift_left_events: Vec<(AluEvent, ALUTypeRecord)>,
60    /// A trace of the SRL, SRLI, SRA, and SRAI events.
61    pub shift_right_events: Vec<(AluEvent, ALUTypeRecord)>,
62    /// A trace of the DIV, DIVU, REM, and REMU events.
63    pub divrem_events: Vec<(AluEvent, RTypeRecord)>,
64    /// A trace of the SLT, SLTI, SLTU, and SLTIU events.
65    pub lt_events: Vec<(AluEvent, ALUTypeRecord)>,
66    /// A trace of load byte instructions.
67    pub memory_load_byte_events: Vec<(MemInstrEvent, ITypeRecord)>,
68    /// A trace of load half instructions.
69    pub memory_load_half_events: Vec<(MemInstrEvent, ITypeRecord)>,
70    /// A trace of load word instructions.
71    pub memory_load_word_events: Vec<(MemInstrEvent, ITypeRecord)>,
72    /// A trace of load instructions with `op_a = x0`.
73    pub memory_load_x0_events: Vec<(MemInstrEvent, ITypeRecord)>,
74    /// A trace of load double instructions.
75    pub memory_load_double_events: Vec<(MemInstrEvent, ITypeRecord)>,
76    /// A trace of store byte instructions.
77    pub memory_store_byte_events: Vec<(MemInstrEvent, ITypeRecord)>,
78    /// A trace of store half instructions.
79    pub memory_store_half_events: Vec<(MemInstrEvent, ITypeRecord)>,
80    /// A trace of store word instructions.
81    pub memory_store_word_events: Vec<(MemInstrEvent, ITypeRecord)>,
82    /// A trace of store double instructions.
83    pub memory_store_double_events: Vec<(MemInstrEvent, ITypeRecord)>,
84    /// A trace of the AUIPC and LUI events.
85    pub utype_events: Vec<(UTypeEvent, JTypeRecord)>,
86    /// A trace of the branch events.
87    pub branch_events: Vec<(BranchEvent, ITypeRecord)>,
88    /// A trace of the JAL events.
89    pub jal_events: Vec<(JumpEvent, JTypeRecord)>,
90    /// A trace of the JALR events.
91    pub jalr_events: Vec<(JumpEvent, ITypeRecord)>,
92    /// A trace of the byte lookups that are needed.
93    pub byte_lookups: HashMap<ByteLookupEvent, usize>,
94    /// A trace of the precompile events.
95    pub precompile_events: PrecompileEvents,
96    /// A trace of the global memory initialize events.
97    pub global_memory_initialize_events: Vec<MemoryInitializeFinalizeEvent>,
98    /// A trace of the global memory finalize events.
99    pub global_memory_finalize_events: Vec<MemoryInitializeFinalizeEvent>,
100    /// A trace of the global page prot initialize events.
101    pub global_page_prot_initialize_events: Vec<PageProtInitializeFinalizeEvent>,
102    /// A trace of the global page prot finalize events.
103    pub global_page_prot_finalize_events: Vec<PageProtInitializeFinalizeEvent>,
104    /// A trace of all the shard's local memory events.
105    pub cpu_local_memory_access: Vec<MemoryLocalEvent>,
106    /// A trace of all the local page prot events.
107    pub cpu_local_page_prot_access: Vec<PageProtLocalEvent>,
108    /// A trace of all the syscall events.
109    pub syscall_events: Vec<(SyscallEvent, RTypeRecord)>,
110    /// A trace of all the global interaction events.
111    pub global_interaction_events: Vec<GlobalInteractionEvent>,
112    /// A trace of all instruction fetch events.
113    pub instruction_fetch_events: Vec<(InstructionFetchEvent, MemoryAccessRecord)>,
114    /// A trace of all instruction decode events.
115    pub instruction_decode_events: Vec<InstructionDecodeEvent>,
116    /// The global culmulative sum.
117    pub global_cumulative_sum: Arc<Mutex<SepticDigest<u32>>>,
118    /// The global interaction event count.
119    pub global_interaction_event_count: u32,
120    /// Memory records used to bump the timestamp of the register memory access.
121    pub bump_memory_events: Vec<(MemoryRecordEnum, u64, bool)>,
122    /// Record where the `clk >> 24` or `pc >> 16` has incremented.
123    pub bump_state_events: Vec<(u64, u64, bool, u64)>,
124    /// The public values.
125    pub public_values: PublicValues<u32, u64, u64, u32>,
126    /// The next nonce to use for a new lookup.
127    pub next_nonce: u64,
128    /// The shape of the proof.
129    pub shape: Option<Shape<RiscvAirId>>,
130    /// The estimated total trace area of the proof.
131    pub estimated_trace_area: u64,
132    /// The initial timestamp of the shard.
133    pub initial_timestamp: u64,
134    /// The final timestamp of the shard.
135    pub last_timestamp: u64,
136    /// The start program counter.
137    pub pc_start: Option<u64>,
138    /// The final program counter.
139    pub next_pc: u64,
140    /// The exit code.
141    pub exit_code: u32,
142    /// Use optimized `generate_dependencies` for global chip.
143    pub global_dependencies_opt: bool,
144}
145
146impl ExecutionRecord {
147    /// Create a new [`ExecutionRecord`].
148    #[must_use]
149    pub fn new(
150        program: Arc<Program>,
151        proof_nonce: [u32; PROOF_NONCE_NUM_WORDS],
152        global_dependencies_opt: bool,
153    ) -> Self {
154        let mut result = Self { program, ..Default::default() };
155        result.public_values.proof_nonce = proof_nonce;
156        result.global_dependencies_opt = global_dependencies_opt;
157        result
158    }
159
160    /// Create a new [`ExecutionRecord`] with preallocated event vecs.
161    #[must_use]
162    pub fn new_preallocated(
163        program: Arc<Program>,
164        proof_nonce: [u32; PROOF_NONCE_NUM_WORDS],
165        global_dependencies_opt: bool,
166        reservation_size: usize,
167    ) -> Self {
168        let mut result = Self { program, ..Default::default() };
169
170        result.add_events.reserve(reservation_size);
171        result.addi_events.reserve(reservation_size);
172        result.addw_events.reserve(reservation_size);
173        result.mul_events.reserve(reservation_size);
174        result.sub_events.reserve(reservation_size);
175        result.subw_events.reserve(reservation_size);
176        result.bitwise_events.reserve(reservation_size);
177        result.shift_left_events.reserve(reservation_size);
178        result.shift_right_events.reserve(reservation_size);
179        result.divrem_events.reserve(reservation_size);
180        result.lt_events.reserve(reservation_size);
181        result.branch_events.reserve(reservation_size);
182        result.jal_events.reserve(reservation_size);
183        result.jalr_events.reserve(reservation_size);
184        result.utype_events.reserve(reservation_size);
185        result.memory_load_x0_events.reserve(reservation_size);
186        result.memory_load_byte_events.reserve(reservation_size);
187        result.memory_load_half_events.reserve(reservation_size);
188        result.memory_load_word_events.reserve(reservation_size);
189        result.memory_load_double_events.reserve(reservation_size);
190        result.memory_store_byte_events.reserve(reservation_size);
191        result.memory_store_half_events.reserve(reservation_size);
192        result.memory_store_word_events.reserve(reservation_size);
193        result.memory_store_double_events.reserve(reservation_size);
194        result.global_memory_initialize_events.reserve(reservation_size);
195        result.global_memory_finalize_events.reserve(reservation_size);
196        result.global_interaction_events.reserve(reservation_size);
197        result.byte_lookups.reserve(reservation_size);
198
199        result.public_values.proof_nonce = proof_nonce;
200        result.global_dependencies_opt = global_dependencies_opt;
201        result
202    }
203
204    /// Take out events from the [`ExecutionRecord`] that should be deferred to a separate shard.
205    ///
206    /// Note: we usually defer events that would increase the recursion cost significantly if
207    /// included in every shard.
208    #[must_use]
209    pub fn defer<'a>(
210        &mut self,
211        retain_presets: impl IntoIterator<Item = &'a RetainedEventsPreset>,
212    ) -> ExecutionRecord {
213        let mut execution_record = ExecutionRecord::new(
214            self.program.clone(),
215            self.public_values.proof_nonce,
216            self.global_dependencies_opt,
217        );
218        execution_record.precompile_events = std::mem::take(&mut self.precompile_events);
219
220        // Take back the events that should be retained.
221        self.precompile_events.events.extend(
222            retain_presets.into_iter().flat_map(RetainedEventsPreset::syscall_codes).filter_map(
223                |code| execution_record.precompile_events.events.remove(code).map(|x| (*code, x)),
224            ),
225        );
226
227        execution_record.global_memory_initialize_events =
228            std::mem::take(&mut self.global_memory_initialize_events);
229        execution_record.global_memory_finalize_events =
230            std::mem::take(&mut self.global_memory_finalize_events);
231        execution_record.global_page_prot_initialize_events =
232            std::mem::take(&mut self.global_page_prot_initialize_events);
233        execution_record.global_page_prot_finalize_events =
234            std::mem::take(&mut self.global_page_prot_finalize_events);
235        execution_record
236    }
237
238    /// Splits the deferred [`ExecutionRecord`] into multiple [`ExecutionRecord`]s, each which
239    /// contain a "reasonable" number of deferred events.
240    #[allow(clippy::too_many_lines)]
241    pub fn split(
242        &mut self,
243        done: bool,
244        last_record: &mut ExecutionRecord,
245        can_pack_global_memory: bool,
246        opts: &SplitOpts,
247    ) -> Vec<ExecutionRecord> {
248        let mut shards = Vec::new();
249
250        let precompile_events = take(&mut self.precompile_events);
251
252        for (syscall_code, events) in precompile_events.into_iter() {
253            let threshold: usize = opts.syscall_threshold[syscall_code];
254
255            let chunks = events.chunks_exact(threshold);
256            if done {
257                let remainder = chunks.remainder().to_vec();
258                if !remainder.is_empty() {
259                    let mut execution_record = ExecutionRecord::new(
260                        self.program.clone(),
261                        self.public_values.proof_nonce,
262                        self.global_dependencies_opt,
263                    );
264                    execution_record.precompile_events.insert(syscall_code, remainder);
265                    execution_record.public_values.update_initialized_state(
266                        self.program.pc_start_abs,
267                        self.program.enable_untrusted_programs,
268                    );
269                    shards.push(execution_record);
270                }
271            } else {
272                self.precompile_events.insert(syscall_code, chunks.remainder().to_vec());
273            }
274            let mut event_shards = chunks
275                .map(|chunk| {
276                    let mut execution_record = ExecutionRecord::new(
277                        self.program.clone(),
278                        self.public_values.proof_nonce,
279                        self.global_dependencies_opt,
280                    );
281                    execution_record.precompile_events.insert(syscall_code, chunk.to_vec());
282                    execution_record.public_values.update_initialized_state(
283                        self.program.pc_start_abs,
284                        self.program.enable_untrusted_programs,
285                    );
286                    execution_record
287                })
288                .collect::<Vec<_>>();
289            shards.append(&mut event_shards);
290        }
291
292        if done {
293            // If there are no precompile shards, and `last_record` is Some, pack the memory events
294            // into the last record.
295            let pack_memory_events_into_last_record = can_pack_global_memory && shards.is_empty();
296            let mut blank_record = ExecutionRecord::new(
297                self.program.clone(),
298                self.public_values.proof_nonce,
299                self.global_dependencies_opt,
300            );
301
302            // Clone the public values of the last record to update the last record's public values.
303            let last_record_public_values = last_record.public_values;
304
305            // Update the state of the blank record
306            blank_record
307                .public_values
308                .update_finalized_state_from_public_values(&last_record_public_values);
309
310            // If `last_record` is None, use a blank record to store the memory events.
311            let mem_record_ref =
312                if pack_memory_events_into_last_record { last_record } else { &mut blank_record };
313
314            let mut init_page_idx = 0;
315            let mut finalize_page_idx = 0;
316
317            // Put all of the page prot init and finalize events into the last record.
318            if !self.global_page_prot_initialize_events.is_empty()
319                || !self.global_page_prot_finalize_events.is_empty()
320            {
321                self.global_page_prot_initialize_events.sort_by_key(|event| event.page_idx);
322                self.global_page_prot_finalize_events.sort_by_key(|event| event.page_idx);
323
324                let init_iter = self.global_page_prot_initialize_events.iter();
325                let finalize_iter = self.global_page_prot_finalize_events.iter();
326                let mut init_remaining = init_iter.as_slice();
327                let mut finalize_remaining = finalize_iter.as_slice();
328
329                while !init_remaining.is_empty() || !finalize_remaining.is_empty() {
330                    let capacity = 2 * opts.page_prot;
331                    let init_to_take = init_remaining.len().min(capacity);
332                    let finalize_to_take = finalize_remaining.len().min(capacity - init_to_take);
333
334                    let finalize_to_take = if init_to_take < capacity {
335                        finalize_to_take.max(finalize_remaining.len().min(capacity - init_to_take))
336                    } else {
337                        0
338                    };
339
340                    let page_prot_init_chunk = &init_remaining[..init_to_take];
341                    let page_prot_finalize_chunk = &finalize_remaining[..finalize_to_take];
342
343                    mem_record_ref
344                        .global_page_prot_initialize_events
345                        .extend_from_slice(page_prot_init_chunk);
346                    mem_record_ref.public_values.previous_init_page_idx = init_page_idx;
347                    if let Some(last_event) = page_prot_init_chunk.last() {
348                        init_page_idx = last_event.page_idx;
349                    }
350                    mem_record_ref.public_values.last_init_page_idx = init_page_idx;
351
352                    mem_record_ref
353                        .global_page_prot_finalize_events
354                        .extend_from_slice(page_prot_finalize_chunk);
355                    mem_record_ref.public_values.previous_finalize_page_idx = finalize_page_idx;
356                    if let Some(last_event) = page_prot_finalize_chunk.last() {
357                        finalize_page_idx = last_event.page_idx;
358                    }
359                    mem_record_ref.public_values.last_finalize_page_idx = finalize_page_idx;
360
361                    // Because page prot events are non empty, we set the page protect active flag
362                    mem_record_ref.public_values.is_untrusted_programs_enabled = true as u32;
363
364                    init_remaining = &init_remaining[init_to_take..];
365                    finalize_remaining = &finalize_remaining[finalize_to_take..];
366
367                    // Ensure last record has same proof nonce as other shards
368                    mem_record_ref.public_values.proof_nonce = self.public_values.proof_nonce;
369                    mem_record_ref.global_dependencies_opt = self.global_dependencies_opt;
370
371                    if !pack_memory_events_into_last_record {
372                        // If not packing memory events into the last record, add 'last_record_ref'
373                        // to the returned records. `take` replaces `blank_program` with the
374                        // default.
375                        shards.push(take(mem_record_ref));
376
377                        // Reset the last record so its program is the correct one. (The default
378                        // program provided by `take` contains no
379                        // instructions.)
380                        mem_record_ref.program = self.program.clone();
381                        // Reset the public values execution state to match the last record state.
382                        mem_record_ref
383                            .public_values
384                            .update_finalized_state_from_public_values(&last_record_public_values);
385                    }
386                }
387            }
388
389            self.global_memory_initialize_events.sort_by_key(|event| event.addr);
390            self.global_memory_finalize_events.sort_by_key(|event| event.addr);
391
392            let mut init_addr = 0;
393            let mut finalize_addr = 0;
394
395            let mut mem_init_remaining = self.global_memory_initialize_events.as_slice();
396            let mut mem_finalize_remaining = self.global_memory_finalize_events.as_slice();
397
398            while !mem_init_remaining.is_empty() || !mem_finalize_remaining.is_empty() {
399                let capacity = 2 * opts.memory;
400                let init_to_take = mem_init_remaining.len().min(capacity);
401                let finalize_to_take = mem_finalize_remaining.len().min(capacity - init_to_take);
402
403                let finalize_to_take = if init_to_take < capacity {
404                    finalize_to_take.max(mem_finalize_remaining.len().min(capacity - init_to_take))
405                } else {
406                    0
407                };
408
409                let mem_init_chunk = &mem_init_remaining[..init_to_take];
410                let mem_finalize_chunk = &mem_finalize_remaining[..finalize_to_take];
411
412                mem_record_ref.global_memory_initialize_events.extend_from_slice(mem_init_chunk);
413                mem_record_ref.public_values.previous_init_addr = init_addr;
414                if let Some(last_event) = mem_init_chunk.last() {
415                    init_addr = last_event.addr;
416                }
417                mem_record_ref.public_values.last_init_addr = init_addr;
418
419                mem_record_ref.global_memory_finalize_events.extend_from_slice(mem_finalize_chunk);
420                mem_record_ref.public_values.previous_finalize_addr = finalize_addr;
421                if let Some(last_event) = mem_finalize_chunk.last() {
422                    finalize_addr = last_event.addr;
423                }
424                mem_record_ref.public_values.last_finalize_addr = finalize_addr;
425
426                mem_record_ref.public_values.proof_nonce = self.public_values.proof_nonce;
427                mem_record_ref.global_dependencies_opt = self.global_dependencies_opt;
428
429                mem_init_remaining = &mem_init_remaining[init_to_take..];
430                mem_finalize_remaining = &mem_finalize_remaining[finalize_to_take..];
431
432                if !pack_memory_events_into_last_record {
433                    mem_record_ref.public_values.previous_init_page_idx = init_page_idx;
434                    mem_record_ref.public_values.last_init_page_idx = init_page_idx;
435                    mem_record_ref.public_values.previous_finalize_page_idx = finalize_page_idx;
436                    mem_record_ref.public_values.last_finalize_page_idx = finalize_page_idx;
437
438                    // If not packing memory events into the last record, add 'last_record_ref'
439                    // to the returned records. `take` replaces `blank_program` with the default.
440                    shards.push(take(mem_record_ref));
441
442                    // Reset the last record so its program is the correct one. (The default program
443                    // provided by `take` contains no instructions.)
444                    mem_record_ref.program = self.program.clone();
445                    // Reset the public values execution state to match the last record state.
446                    mem_record_ref
447                        .public_values
448                        .update_finalized_state_from_public_values(&last_record_public_values);
449                }
450            }
451        }
452
453        shards
454    }
455
456    /// Return the number of rows needed for a chip, according to the proof shape specified in the
457    /// struct.
458    ///
459    /// **deprecated**: TODO: remove this method.
460    pub fn fixed_log2_rows<F: PrimeField, A: MachineAir<F>>(&self, _air: &A) -> Option<usize> {
461        None
462    }
463
464    /// Determines whether the execution record contains CPU events.
465    #[must_use]
466    pub fn contains_cpu(&self) -> bool {
467        self.cpu_event_count > 0
468    }
469
470    #[inline]
471    /// Add a precompile event to the execution record.
472    pub fn add_precompile_event(
473        &mut self,
474        syscall_code: SyscallCode,
475        syscall_event: SyscallEvent,
476        event: PrecompileEvent,
477    ) {
478        self.precompile_events.add_event(syscall_code, syscall_event, event);
479    }
480
481    /// Get all the precompile events for a syscall code.
482    #[inline]
483    #[must_use]
484    pub fn get_precompile_events(
485        &self,
486        syscall_code: SyscallCode,
487    ) -> &Vec<(SyscallEvent, PrecompileEvent)> {
488        self.precompile_events.get_events(syscall_code).expect("Precompile events not found")
489    }
490
491    /// Get all the local memory events.
492    #[inline]
493    pub fn get_local_mem_events(&self) -> impl Iterator<Item = &MemoryLocalEvent> {
494        let precompile_local_mem_events = self.precompile_events.get_local_mem_events();
495        precompile_local_mem_events.chain(self.cpu_local_memory_access.iter())
496    }
497
498    /// Get all the local page prot events.
499    #[inline]
500    pub fn get_local_page_prot_events(&self) -> impl Iterator<Item = &PageProtLocalEvent> {
501        let precompile_local_page_prot_events = self.precompile_events.get_local_page_prot_events();
502        precompile_local_page_prot_events.chain(self.cpu_local_page_prot_access.iter())
503    }
504
505    /// Reset the record, without deallocating the event vecs.
506    #[inline]
507    pub fn reset(&mut self) {
508        self.add_events.truncate(0);
509        self.addw_events.truncate(0);
510        self.addi_events.truncate(0);
511        self.mul_events.truncate(0);
512        self.sub_events.truncate(0);
513        self.subw_events.truncate(0);
514        self.bitwise_events.truncate(0);
515        self.shift_left_events.truncate(0);
516        self.shift_right_events.truncate(0);
517        self.divrem_events.truncate(0);
518        self.lt_events.truncate(0);
519        self.memory_load_byte_events.truncate(0);
520        self.memory_load_half_events.truncate(0);
521        self.memory_load_word_events.truncate(0);
522        self.memory_load_x0_events.truncate(0);
523        self.memory_load_double_events.truncate(0);
524        self.memory_store_byte_events.truncate(0);
525        self.memory_store_half_events.truncate(0);
526        self.memory_store_word_events.truncate(0);
527        self.memory_store_double_events.truncate(0);
528        self.utype_events.truncate(0);
529        self.branch_events.truncate(0);
530        self.jal_events.truncate(0);
531        self.jalr_events.truncate(0);
532        self.byte_lookups.clear();
533        self.precompile_events = PrecompileEvents::default();
534        self.global_memory_initialize_events.truncate(0);
535        self.global_memory_finalize_events.truncate(0);
536        self.global_page_prot_initialize_events.truncate(0);
537        self.global_page_prot_finalize_events.truncate(0);
538        self.cpu_local_memory_access.truncate(0);
539        self.cpu_local_page_prot_access.truncate(0);
540        self.syscall_events.truncate(0);
541        self.global_interaction_events.truncate(0);
542        self.instruction_fetch_events.truncate(0);
543        self.instruction_decode_events.truncate(0);
544        let mut cumulative_sum = self.global_cumulative_sum.lock().unwrap();
545        *cumulative_sum = SepticDigest::default();
546        self.global_interaction_event_count = 0;
547        self.bump_memory_events.truncate(0);
548        self.bump_state_events.truncate(0);
549        let _ = self.public_values.reset();
550        self.next_nonce = 0;
551        self.shape = None;
552        self.estimated_trace_area = 0;
553        self.initial_timestamp = 0;
554        self.last_timestamp = 0;
555        self.pc_start = None;
556        self.next_pc = 0;
557        self.exit_code = 0;
558    }
559}
560
561/// A memory access record.
562#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize, DeepSizeOf)]
563pub struct MemoryAccessRecord {
564    /// The memory access of the `a` register.
565    pub a: Option<MemoryRecordEnum>,
566    /// The memory access of the `b` register.
567    pub b: Option<MemoryRecordEnum>,
568    /// The memory access of the `c` register.
569    pub c: Option<MemoryRecordEnum>,
570    /// The memory access of the `memory` register.
571    pub memory: Option<MemoryRecordEnum>,
572    /// The memory access of the untrusted instruction.
573    /// If memory access for `untrusted_instruction` occurs, we also pass along the selected 32
574    /// bits that is the encoded 32 bit instruction alongside the raw 64bit read
575    pub untrusted_instruction: Option<(MemoryRecordEnum, u32)>,
576}
577
578/// Memory record where all three operands are registers.
579#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, DeepSizeOf)]
580pub struct RTypeRecord {
581    /// The a operand.
582    pub op_a: u8,
583    /// The register `op_a` record.
584    pub a: MemoryRecordEnum,
585    /// The b operand.
586    pub op_b: u64,
587    /// The register `op_b` record.
588    pub b: MemoryRecordEnum,
589    /// The c operand.
590    pub op_c: u64,
591    /// The register `op_c` record.
592    pub c: MemoryRecordEnum,
593    /// Whether the instruction is untrusted.
594    pub is_untrusted: bool,
595}
596
597impl RTypeRecord {
598    pub(crate) fn new(value: &MemoryAccessRecord, instruction: &Instruction) -> Self {
599        Self {
600            op_a: instruction.op_a,
601            a: value.a.expect("expected MemoryRecord for op_a in RTypeRecord"),
602            op_b: instruction.op_b,
603            b: value.b.expect("expected MemoryRecord for op_b in RTypeRecord"),
604            op_c: instruction.op_c,
605            c: value.c.expect("expected MemoryRecord for op_c in RTypeRecord"),
606            is_untrusted: value.untrusted_instruction.is_some(),
607        }
608    }
609}
610/// Memory record where the first two operands are registers.
611#[derive(Debug, Clone, Copy, Serialize, Deserialize, DeepSizeOf)]
612pub struct ITypeRecord {
613    /// The a operand.
614    pub op_a: u8,
615    /// The register `op_a` record.
616    pub a: MemoryRecordEnum,
617    /// The b operand.
618    pub op_b: u64,
619    /// The register `op_b` record.
620    pub b: MemoryRecordEnum,
621    /// The c operand.
622    pub op_c: u64,
623    /// Whether the instruction is untrusted.
624    pub is_untrusted: bool,
625}
626
627impl ITypeRecord {
628    pub(crate) fn new(value: &MemoryAccessRecord, instruction: &Instruction) -> Self {
629        debug_assert!(value.c.is_none());
630        Self {
631            op_a: instruction.op_a,
632            a: value.a.expect("expected MemoryRecord for op_a in ITypeRecord"),
633            op_b: instruction.op_b,
634            b: value.b.expect("expected MemoryRecord for op_b in ITypeRecord"),
635            op_c: instruction.op_c,
636            is_untrusted: value.untrusted_instruction.is_some(),
637        }
638    }
639}
640
641/// Memory record where only one operand is a register.
642#[derive(Debug, Clone, Copy, Serialize, Deserialize, DeepSizeOf)]
643pub struct JTypeRecord {
644    /// The a operand.
645    pub op_a: u8,
646    /// The register `op_a` record.
647    pub a: MemoryRecordEnum,
648    /// The b operand.
649    pub op_b: u64,
650    /// The c operand.
651    pub op_c: u64,
652    /// Whether the instruction is untrusted.
653    pub is_untrusted: bool,
654}
655
656impl JTypeRecord {
657    pub(crate) fn new(value: &MemoryAccessRecord, instruction: &Instruction) -> Self {
658        debug_assert!(value.b.is_none());
659        debug_assert!(value.c.is_none());
660        Self {
661            op_a: instruction.op_a,
662            a: value.a.expect("expected MemoryRecord for op_a in JTypeRecord"),
663            op_b: instruction.op_b,
664            op_c: instruction.op_c,
665            is_untrusted: value.untrusted_instruction.is_some(),
666        }
667    }
668}
669
670/// Memory record where only the first two operands are known to be registers, but the third isn't.
671#[derive(Debug, Clone, Copy, Serialize, Deserialize, DeepSizeOf)]
672pub struct ALUTypeRecord {
673    /// The a operand.
674    pub op_a: u8,
675    /// The register `op_a` record.
676    pub a: MemoryRecordEnum,
677    /// The b operand.
678    pub op_b: u64,
679    /// The register `op_b` record.
680    pub b: MemoryRecordEnum,
681    /// The c operand.
682    pub op_c: u64,
683    /// The register `op_c` record.
684    pub c: Option<MemoryRecordEnum>,
685    /// Whether the instruction has an immediate.
686    pub is_imm: bool,
687    /// Whether the instruction is untrusted.
688    pub is_untrusted: bool,
689}
690
691impl ALUTypeRecord {
692    pub(crate) fn new(value: &MemoryAccessRecord, instruction: &Instruction) -> Self {
693        Self {
694            op_a: instruction.op_a,
695            a: value.a.expect("expected MemoryRecord for op_a in ALUTypeRecord"),
696            op_b: instruction.op_b,
697            b: value.b.expect("expected MemoryRecord for op_b in ALUTypeRecord"),
698            op_c: instruction.op_c,
699            c: value.c,
700            is_imm: instruction.imm_c,
701            is_untrusted: value.untrusted_instruction.is_some(),
702        }
703    }
704}
705
706/// Memory record for an untrusted program instruction fetch.
707#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
708pub struct UntrustedProgramInstructionRecord {
709    /// The a operand.
710    pub memory_access_record: MemoryAccessRecord,
711    /// The instruction.
712    pub instruction: Instruction,
713    /// The encoded instruction.
714    pub encoded_instruction: u32,
715}
716
717impl MachineRecord for ExecutionRecord {
718    fn stats(&self) -> HashMap<String, usize> {
719        let mut stats = HashMap::new();
720        stats.insert("cpu_events".to_string(), self.cpu_event_count as usize);
721        stats.insert("add_events".to_string(), self.add_events.len());
722        stats.insert("mul_events".to_string(), self.mul_events.len());
723        stats.insert("sub_events".to_string(), self.sub_events.len());
724        stats.insert("bitwise_events".to_string(), self.bitwise_events.len());
725        stats.insert("shift_left_events".to_string(), self.shift_left_events.len());
726        stats.insert("shift_right_events".to_string(), self.shift_right_events.len());
727        stats.insert("divrem_events".to_string(), self.divrem_events.len());
728        stats.insert("lt_events".to_string(), self.lt_events.len());
729        stats.insert("load_byte_events".to_string(), self.memory_load_byte_events.len());
730        stats.insert("load_half_events".to_string(), self.memory_load_half_events.len());
731        stats.insert("load_word_events".to_string(), self.memory_load_word_events.len());
732        stats.insert("load_x0_events".to_string(), self.memory_load_x0_events.len());
733        stats.insert("store_byte_events".to_string(), self.memory_store_byte_events.len());
734        stats.insert("store_half_events".to_string(), self.memory_store_half_events.len());
735        stats.insert("store_word_events".to_string(), self.memory_store_word_events.len());
736        stats.insert("branch_events".to_string(), self.branch_events.len());
737        stats.insert("jal_events".to_string(), self.jal_events.len());
738        stats.insert("jalr_events".to_string(), self.jalr_events.len());
739        stats.insert("utype_events".to_string(), self.utype_events.len());
740        stats.insert("instruction_decode_events".to_string(), self.instruction_decode_events.len());
741        stats.insert("instruction_fetch_events".to_string(), self.instruction_fetch_events.len());
742
743        for (syscall_code, events) in self.precompile_events.iter() {
744            stats.insert(format!("syscall {syscall_code:?}"), events.len());
745        }
746
747        stats.insert(
748            "global_memory_initialize_events".to_string(),
749            self.global_memory_initialize_events.len(),
750        );
751        stats.insert(
752            "global_memory_finalize_events".to_string(),
753            self.global_memory_finalize_events.len(),
754        );
755        stats.insert("local_memory_access_events".to_string(), self.cpu_local_memory_access.len());
756        stats.insert(
757            "local_page_prot_access_events".to_string(),
758            self.cpu_local_page_prot_access.len(),
759        );
760        if self.contains_cpu() {
761            stats.insert("byte_lookups".to_string(), self.byte_lookups.len());
762        }
763        // Filter out the empty events.
764        stats.retain(|_, v| *v != 0);
765        stats
766    }
767
768    fn append(&mut self, other: &mut ExecutionRecord) {
769        self.cpu_event_count += other.cpu_event_count;
770        other.cpu_event_count = 0;
771        self.public_values.global_count += other.public_values.global_count;
772        other.public_values.global_count = 0;
773        self.public_values.global_init_count += other.public_values.global_init_count;
774        other.public_values.global_init_count = 0;
775        self.public_values.global_finalize_count += other.public_values.global_finalize_count;
776        other.public_values.global_finalize_count = 0;
777        self.public_values.global_page_prot_init_count +=
778            other.public_values.global_page_prot_init_count;
779        other.public_values.global_page_prot_init_count = 0;
780        self.public_values.global_page_prot_finalize_count +=
781            other.public_values.global_page_prot_finalize_count;
782        other.public_values.global_page_prot_finalize_count = 0;
783        self.estimated_trace_area += other.estimated_trace_area;
784        other.estimated_trace_area = 0;
785        self.add_events.append(&mut other.add_events);
786        self.sub_events.append(&mut other.sub_events);
787        self.mul_events.append(&mut other.mul_events);
788        self.bitwise_events.append(&mut other.bitwise_events);
789        self.shift_left_events.append(&mut other.shift_left_events);
790        self.shift_right_events.append(&mut other.shift_right_events);
791        self.divrem_events.append(&mut other.divrem_events);
792        self.lt_events.append(&mut other.lt_events);
793        self.memory_load_byte_events.append(&mut other.memory_load_byte_events);
794        self.memory_load_half_events.append(&mut other.memory_load_half_events);
795        self.memory_load_word_events.append(&mut other.memory_load_word_events);
796        self.memory_load_x0_events.append(&mut other.memory_load_x0_events);
797        self.memory_store_byte_events.append(&mut other.memory_store_byte_events);
798        self.memory_store_half_events.append(&mut other.memory_store_half_events);
799        self.memory_store_word_events.append(&mut other.memory_store_word_events);
800        self.branch_events.append(&mut other.branch_events);
801        self.jal_events.append(&mut other.jal_events);
802        self.jalr_events.append(&mut other.jalr_events);
803        self.utype_events.append(&mut other.utype_events);
804        self.syscall_events.append(&mut other.syscall_events);
805        self.bump_memory_events.append(&mut other.bump_memory_events);
806        self.bump_state_events.append(&mut other.bump_state_events);
807        self.precompile_events.append(&mut other.precompile_events);
808        self.instruction_fetch_events.append(&mut other.instruction_fetch_events);
809        self.instruction_decode_events.append(&mut other.instruction_decode_events);
810
811        if self.byte_lookups.is_empty() {
812            self.byte_lookups = std::mem::take(&mut other.byte_lookups);
813        } else {
814            self.add_byte_lookup_events_from_maps(vec![&other.byte_lookups]);
815        }
816
817        self.global_memory_initialize_events.append(&mut other.global_memory_initialize_events);
818        self.global_memory_finalize_events.append(&mut other.global_memory_finalize_events);
819        self.global_page_prot_initialize_events
820            .append(&mut other.global_page_prot_initialize_events);
821        self.global_page_prot_finalize_events.append(&mut other.global_page_prot_finalize_events);
822        self.cpu_local_memory_access.append(&mut other.cpu_local_memory_access);
823        self.cpu_local_page_prot_access.append(&mut other.cpu_local_page_prot_access);
824        self.global_interaction_events.append(&mut other.global_interaction_events);
825    }
826
827    /// Retrieves the public values.  This method is needed for the `MachineRecord` trait, since
828    fn public_values<F: AbstractField>(&self) -> Vec<F> {
829        let mut public_values = self.public_values;
830        public_values.global_cumulative_sum = *self.global_cumulative_sum.lock().unwrap();
831        public_values.to_vec()
832    }
833
834    /// Constrains the public values.
835    #[allow(clippy::type_complexity)]
836    fn eval_public_values<AB: SP1AirBuilder>(builder: &mut AB) {
837        let public_values_slice: [AB::PublicVar; SP1_PROOF_NUM_PV_ELTS] =
838            core::array::from_fn(|i| builder.public_values()[i]);
839        let public_values: &PublicValues<
840            [AB::PublicVar; 4],
841            [AB::PublicVar; 3],
842            [AB::PublicVar; 4],
843            AB::PublicVar,
844        > = public_values_slice.as_slice().borrow();
845
846        for var in public_values.empty {
847            builder.assert_zero(var);
848        }
849
850        Self::eval_state(public_values, builder);
851        Self::eval_first_execution_shard(public_values, builder);
852        Self::eval_exit_code(public_values, builder);
853        Self::eval_committed_value_digest(public_values, builder);
854        Self::eval_deferred_proofs_digest(public_values, builder);
855        Self::eval_global_sum(public_values, builder);
856        Self::eval_global_memory_init(public_values, builder);
857        Self::eval_global_memory_finalize(public_values, builder);
858        Self::eval_global_page_prot_init(public_values, builder);
859        Self::eval_global_page_prot_finalize(public_values, builder);
860    }
861
862    fn interactions_in_public_values() -> Vec<InteractionKind> {
863        InteractionKind::all_kinds()
864            .iter()
865            .filter(|kind| kind.appears_in_eval_public_values())
866            .copied()
867            .collect()
868    }
869}
870
871impl ByteRecord for ExecutionRecord {
872    fn add_byte_lookup_event(&mut self, blu_event: ByteLookupEvent) {
873        *self.byte_lookups.entry(blu_event).or_insert(0) += 1;
874    }
875
876    #[inline]
877    fn add_byte_lookup_events_from_maps(
878        &mut self,
879        new_events: Vec<&HashMap<ByteLookupEvent, usize>>,
880    ) {
881        for new_blu_map in new_events {
882            for (blu_event, count) in new_blu_map.iter() {
883                *self.byte_lookups.entry(*blu_event).or_insert(0) += count;
884            }
885        }
886    }
887}
888
889impl ExecutionRecord {
890    #[allow(clippy::type_complexity)]
891    fn eval_state<AB: SP1AirBuilder>(
892        public_values: &PublicValues<
893            [AB::PublicVar; 4],
894            [AB::PublicVar; 3],
895            [AB::PublicVar; 4],
896            AB::PublicVar,
897        >,
898        builder: &mut AB,
899    ) {
900        let initial_timestamp_high = public_values.initial_timestamp[1].into()
901            + public_values.initial_timestamp[0].into() * AB::Expr::from_canonical_u32(1 << 8);
902        let initial_timestamp_low = public_values.initial_timestamp[3].into()
903            + public_values.initial_timestamp[2].into() * AB::Expr::from_canonical_u32(1 << 16);
904        let last_timestamp_high = public_values.last_timestamp[1].into()
905            + public_values.last_timestamp[0].into() * AB::Expr::from_canonical_u32(1 << 8);
906        let last_timestamp_low = public_values.last_timestamp[3].into()
907            + public_values.last_timestamp[2].into() * AB::Expr::from_canonical_u32(1 << 16);
908
909        // Range check all the timestamp limbs.
910        builder.send_byte(
911            AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
912            public_values.initial_timestamp[0].into(),
913            AB::Expr::from_canonical_u32(16),
914            AB::Expr::zero(),
915            AB::Expr::one(),
916        );
917        builder.send_byte(
918            AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
919            (public_values.initial_timestamp[3].into() - AB::Expr::one())
920                * AB::F::from_canonical_u8(8).inverse(),
921            AB::Expr::from_canonical_u32(13),
922            AB::Expr::zero(),
923            AB::Expr::one(),
924        );
925        builder.send_byte(
926            AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
927            public_values.last_timestamp[0].into(),
928            AB::Expr::from_canonical_u32(16),
929            AB::Expr::zero(),
930            AB::Expr::one(),
931        );
932        builder.send_byte(
933            AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
934            (public_values.last_timestamp[3].into() - AB::Expr::one())
935                * AB::F::from_canonical_u8(8).inverse(),
936            AB::Expr::from_canonical_u32(13),
937            AB::Expr::zero(),
938            AB::Expr::one(),
939        );
940        builder.send_byte(
941            AB::Expr::from_canonical_u8(ByteOpcode::U8Range as u8),
942            AB::Expr::zero(),
943            public_values.initial_timestamp[1],
944            public_values.initial_timestamp[2],
945            AB::Expr::one(),
946        );
947        builder.send_byte(
948            AB::Expr::from_canonical_u8(ByteOpcode::U8Range as u8),
949            AB::Expr::zero(),
950            public_values.last_timestamp[1],
951            public_values.last_timestamp[2],
952            AB::Expr::one(),
953        );
954
955        // Range check all the initial, final program counter limbs.
956        for i in 0..3 {
957            builder.send_byte(
958                AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
959                public_values.pc_start[i].into(),
960                AB::Expr::from_canonical_u32(16),
961                AB::Expr::zero(),
962                AB::Expr::one(),
963            );
964            builder.send_byte(
965                AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
966                public_values.next_pc[i].into(),
967                AB::Expr::from_canonical_u32(16),
968                AB::Expr::zero(),
969                AB::Expr::one(),
970            );
971        }
972
973        // Send and receive the initial and last state.
974        builder.send_state(
975            initial_timestamp_high.clone(),
976            initial_timestamp_low.clone(),
977            public_values.pc_start,
978            AB::Expr::one(),
979        );
980        builder.receive_state(
981            last_timestamp_high.clone(),
982            last_timestamp_low.clone(),
983            public_values.next_pc,
984            AB::Expr::one(),
985        );
986
987        // If the shard is not execution shard, assert that timestamp and pc remains equal.
988        let is_execution_shard = public_values.is_execution_shard.into();
989        builder.assert_bool(is_execution_shard.clone());
990        builder
991            .when_not(is_execution_shard.clone())
992            .assert_eq(initial_timestamp_low.clone(), last_timestamp_low.clone());
993        builder
994            .when_not(is_execution_shard.clone())
995            .assert_eq(initial_timestamp_high.clone(), last_timestamp_high.clone());
996        builder
997            .when_not(is_execution_shard.clone())
998            .assert_all_eq(public_values.pc_start, public_values.next_pc);
999
1000        // IsZeroOperation on the high bits of the timestamp.
1001        builder.assert_bool(public_values.is_timestamp_high_eq);
1002        // If high bits are equal, then `is_timestamp_high_eq == 1`.
1003        builder.assert_eq(
1004            (last_timestamp_high.clone() - initial_timestamp_high.clone())
1005                * public_values.inv_timestamp_high.into(),
1006            AB::Expr::one() - public_values.is_timestamp_high_eq.into(),
1007        );
1008        // If high bits are distinct, then `is_timestamp_high_eq == 0`.
1009        builder.assert_zero(
1010            (last_timestamp_high.clone() - initial_timestamp_high.clone())
1011                * public_values.is_timestamp_high_eq.into(),
1012        );
1013
1014        // IsZeroOperation on the low bits of the timestamp.
1015        builder.assert_bool(public_values.is_timestamp_low_eq);
1016        // If low bits are equal, then `is_timestamp_low_eq == 1`.
1017        builder.assert_eq(
1018            (last_timestamp_low.clone() - initial_timestamp_low.clone())
1019                * public_values.inv_timestamp_low.into(),
1020            AB::Expr::one() - public_values.is_timestamp_low_eq.into(),
1021        );
1022        // If low bits are distinct, then `is_timestamp_low_eq == 0`.
1023        builder.assert_zero(
1024            (last_timestamp_low.clone() - initial_timestamp_low.clone())
1025                * public_values.is_timestamp_low_eq.into(),
1026        );
1027
1028        // If the shard is an execution shard, then the timestamp is different.
1029        builder.assert_eq(
1030            AB::Expr::one() - is_execution_shard.clone(),
1031            public_values.is_timestamp_high_eq.into() * public_values.is_timestamp_low_eq.into(),
1032        );
1033
1034        // Check that an execution shard has `last_timestamp != 1` by providing an inverse.
1035        // The `high + low` value cannot overflow, as they were range checked to be 24 bits.
1036        // `high == 1, low == 0` is impossible, as `low == 1 (mod 8)` as checked in `eval_state`.
1037        builder.when(is_execution_shard.clone()).assert_eq(
1038            (last_timestamp_high + last_timestamp_low - AB::Expr::one())
1039                * public_values.last_timestamp_inv.into(),
1040            AB::Expr::one(),
1041        );
1042    }
1043
1044    #[allow(clippy::type_complexity)]
1045    fn eval_first_execution_shard<AB: SP1AirBuilder>(
1046        public_values: &PublicValues<
1047            [AB::PublicVar; 4],
1048            [AB::PublicVar; 3],
1049            [AB::PublicVar; 4],
1050            AB::PublicVar,
1051        >,
1052        builder: &mut AB,
1053    ) {
1054        // Check that `is_first_execution_shard` is boolean.
1055        builder.assert_bool(public_values.is_first_execution_shard.into());
1056
1057        // Timestamp constraints.
1058        //
1059        // We want to assert that `is_first_execution_shard == 1` corresponds exactly to the unique
1060        // execution shard with initial timestamp 1.We are assuming that there is a unique
1061        // shard with `is_first_execution_shard == 1`. This is enforced in the verifier and
1062        // in recursion. Given thus, it is enough to impose that for this unique shard,
1063        // `initial_timestamp == 1`.
1064        builder.when(public_values.is_first_execution_shard.into()).assert_all_eq(
1065            public_values.initial_timestamp,
1066            [AB::Expr::zero(), AB::Expr::zero(), AB::Expr::zero(), AB::Expr::one()],
1067        );
1068
1069        // If `is_first_execution_shard` is true, check `is_execution_shard == 1`.
1070        builder
1071            .when(public_values.is_first_execution_shard.into())
1072            .assert_one(public_values.is_execution_shard);
1073
1074        // If `is_first_execution_shard` is true, assert the initial boundary conditions.
1075
1076        // Check `prev_committed_value_digest == 0`.
1077        for i in 0..PV_DIGEST_NUM_WORDS {
1078            builder
1079                .when(public_values.is_first_execution_shard.into())
1080                .assert_all_zero(public_values.prev_committed_value_digest[i]);
1081        }
1082
1083        // Check `prev_deferred_proofs_digest == 0`.
1084        builder
1085            .when(public_values.is_first_execution_shard.into())
1086            .assert_all_zero(public_values.prev_deferred_proofs_digest);
1087
1088        // Check `prev_exit_code == 0`.
1089        builder
1090            .when(public_values.is_first_execution_shard.into())
1091            .assert_zero(public_values.prev_exit_code);
1092
1093        // Check `previous_init_addr == 0`.
1094        builder
1095            .when(public_values.is_first_execution_shard.into())
1096            .assert_all_zero(public_values.previous_init_addr);
1097
1098        // Check `previous_finalize_addr == 0`.
1099        builder
1100            .when(public_values.is_first_execution_shard.into())
1101            .assert_all_zero(public_values.previous_finalize_addr);
1102
1103        // Check `previous_init_page_idx == 0`
1104        builder
1105            .when(public_values.is_first_execution_shard.into())
1106            .assert_all_zero(public_values.previous_init_page_idx);
1107
1108        // Check `previous_finalize_page_idx == 0`
1109        builder
1110            .when(public_values.is_first_execution_shard.into())
1111            .assert_all_zero(public_values.previous_finalize_page_idx);
1112
1113        // Check `prev_commit_syscall == 0`.
1114        builder
1115            .when(public_values.is_first_execution_shard.into())
1116            .assert_zero(public_values.prev_commit_syscall);
1117
1118        // Check `prev_commit_deferred_syscall == 0`.
1119        builder
1120            .when(public_values.is_first_execution_shard.into())
1121            .assert_zero(public_values.prev_commit_deferred_syscall);
1122    }
1123
1124    #[allow(clippy::type_complexity)]
1125    fn eval_exit_code<AB: SP1AirBuilder>(
1126        public_values: &PublicValues<
1127            [AB::PublicVar; 4],
1128            [AB::PublicVar; 3],
1129            [AB::PublicVar; 4],
1130            AB::PublicVar,
1131        >,
1132        builder: &mut AB,
1133    ) {
1134        let is_execution_shard = public_values.is_execution_shard.into();
1135
1136        // If the `prev_exit_code` is non-zero, then the `exit_code` must be equal to it.
1137        builder.assert_zero(
1138            public_values.prev_exit_code.into()
1139                * (public_values.exit_code.into() - public_values.prev_exit_code.into()),
1140        );
1141
1142        // If it's not an execution shard, assert that `exit_code` will not change in that shard.
1143        builder
1144            .when_not(is_execution_shard.clone())
1145            .assert_eq(public_values.prev_exit_code, public_values.exit_code);
1146    }
1147
1148    #[allow(clippy::type_complexity)]
1149    fn eval_committed_value_digest<AB: SP1AirBuilder>(
1150        public_values: &PublicValues<
1151            [AB::PublicVar; 4],
1152            [AB::PublicVar; 3],
1153            [AB::PublicVar; 4],
1154            AB::PublicVar,
1155        >,
1156        builder: &mut AB,
1157    ) {
1158        let is_execution_shard = public_values.is_execution_shard.into();
1159
1160        // Assert that both `prev_committed_value_digest` and `committed_value_digest` are bytes.
1161        for i in 0..PV_DIGEST_NUM_WORDS {
1162            builder.send_byte(
1163                AB::Expr::from_canonical_u8(ByteOpcode::U8Range as u8),
1164                AB::Expr::zero(),
1165                public_values.prev_committed_value_digest[i][0],
1166                public_values.prev_committed_value_digest[i][1],
1167                AB::Expr::one(),
1168            );
1169            builder.send_byte(
1170                AB::Expr::from_canonical_u8(ByteOpcode::U8Range as u8),
1171                AB::Expr::zero(),
1172                public_values.prev_committed_value_digest[i][2],
1173                public_values.prev_committed_value_digest[i][3],
1174                AB::Expr::one(),
1175            );
1176            builder.send_byte(
1177                AB::Expr::from_canonical_u8(ByteOpcode::U8Range as u8),
1178                AB::Expr::zero(),
1179                public_values.committed_value_digest[i][0],
1180                public_values.committed_value_digest[i][1],
1181                AB::Expr::one(),
1182            );
1183            builder.send_byte(
1184                AB::Expr::from_canonical_u8(ByteOpcode::U8Range as u8),
1185                AB::Expr::zero(),
1186                public_values.committed_value_digest[i][2],
1187                public_values.committed_value_digest[i][3],
1188                AB::Expr::one(),
1189            );
1190        }
1191
1192        // Assert that both `prev_commit_syscall` and `commit_syscall` are boolean.
1193        builder.assert_bool(public_values.prev_commit_syscall);
1194        builder.assert_bool(public_values.commit_syscall);
1195
1196        // Assert that `prev_commit_syscall == 1` implies `commit_syscall == 1`.
1197        builder.when(public_values.prev_commit_syscall).assert_one(public_values.commit_syscall);
1198
1199        // Assert that the `commit_syscall` value doesn't change in a non-execution shard.
1200        builder
1201            .when_not(is_execution_shard.clone())
1202            .assert_eq(public_values.prev_commit_syscall, public_values.commit_syscall);
1203
1204        // Assert that `committed_value_digest` will not change in a non-execution shard.
1205        for i in 0..PV_DIGEST_NUM_WORDS {
1206            builder.when_not(is_execution_shard.clone()).assert_all_eq(
1207                public_values.prev_committed_value_digest[i],
1208                public_values.committed_value_digest[i],
1209            );
1210        }
1211
1212        // Assert that `prev_committed_value_digest != [0u8; 32]` implies `committed_value_digest`
1213        // must remain equal to the `prev_committed_value_digest`.
1214        for word in public_values.prev_committed_value_digest {
1215            for limb in word {
1216                for i in 0..PV_DIGEST_NUM_WORDS {
1217                    builder.when(limb).assert_all_eq(
1218                        public_values.prev_committed_value_digest[i],
1219                        public_values.committed_value_digest[i],
1220                    );
1221                }
1222            }
1223        }
1224
1225        // Assert that if `prev_commit_syscall` is true, `committed_value_digest` doesn't change.
1226        for i in 0..PV_DIGEST_NUM_WORDS {
1227            builder.when(public_values.prev_commit_syscall).assert_all_eq(
1228                public_values.prev_committed_value_digest[i],
1229                public_values.committed_value_digest[i],
1230            );
1231        }
1232    }
1233
1234    #[allow(clippy::type_complexity)]
1235    fn eval_deferred_proofs_digest<AB: SP1AirBuilder>(
1236        public_values: &PublicValues<
1237            [AB::PublicVar; 4],
1238            [AB::PublicVar; 3],
1239            [AB::PublicVar; 4],
1240            AB::PublicVar,
1241        >,
1242        builder: &mut AB,
1243    ) {
1244        let is_execution_shard = public_values.is_execution_shard.into();
1245
1246        // Assert that `prev_commit_deferred_syscall` and `commit_deferred_syscall` are boolean.
1247        builder.assert_bool(public_values.prev_commit_deferred_syscall);
1248        builder.assert_bool(public_values.commit_deferred_syscall);
1249
1250        // Assert that `prev_commit_deferred_syscall == 1` implies `commit_deferred_syscall == 1`.
1251        builder
1252            .when(public_values.prev_commit_deferred_syscall)
1253            .assert_one(public_values.commit_deferred_syscall);
1254
1255        // Assert that the `commit_deferred_syscall` value doesn't change in a non-execution shard.
1256        builder.when_not(is_execution_shard.clone()).assert_eq(
1257            public_values.prev_commit_deferred_syscall,
1258            public_values.commit_deferred_syscall,
1259        );
1260
1261        // Assert that `deferred_proofs_digest` will not change in a non-execution shard.
1262        builder.when_not(is_execution_shard.clone()).assert_all_eq(
1263            public_values.prev_deferred_proofs_digest,
1264            public_values.deferred_proofs_digest,
1265        );
1266
1267        // Assert that `prev_deferred_proofs_digest != 0` implies `deferred_proofs_digest` must
1268        // remain equal to the `prev_deferred_proofs_digest`.
1269        for limb in public_values.prev_deferred_proofs_digest {
1270            builder.when(limb).assert_all_eq(
1271                public_values.prev_deferred_proofs_digest,
1272                public_values.deferred_proofs_digest,
1273            );
1274        }
1275
1276        // If `prev_commit_deferred_syscall` is true, `deferred_proofs_digest` doesn't change.
1277        builder.when(public_values.prev_commit_deferred_syscall).assert_all_eq(
1278            public_values.prev_deferred_proofs_digest,
1279            public_values.deferred_proofs_digest,
1280        );
1281    }
1282
1283    #[allow(clippy::type_complexity)]
1284    fn eval_global_sum<AB: SP1AirBuilder>(
1285        public_values: &PublicValues<
1286            [AB::PublicVar; 4],
1287            [AB::PublicVar; 3],
1288            [AB::PublicVar; 4],
1289            AB::PublicVar,
1290        >,
1291        builder: &mut AB,
1292    ) {
1293        let initial_sum = SepticDigest::<AB::F>::zero().0;
1294        builder.send(
1295            AirInteraction::new(
1296                once(AB::Expr::zero())
1297                    .chain(initial_sum.x.0.into_iter().map(Into::into))
1298                    .chain(initial_sum.y.0.into_iter().map(Into::into))
1299                    .collect(),
1300                AB::Expr::one(),
1301                InteractionKind::GlobalAccumulation,
1302            ),
1303            InteractionScope::Local,
1304        );
1305        builder.receive(
1306            AirInteraction::new(
1307                once(public_values.global_count.into())
1308                    .chain(public_values.global_cumulative_sum.0.x.0.map(Into::into))
1309                    .chain(public_values.global_cumulative_sum.0.y.0.map(Into::into))
1310                    .collect(),
1311                AB::Expr::one(),
1312                InteractionKind::GlobalAccumulation,
1313            ),
1314            InteractionScope::Local,
1315        );
1316    }
1317
1318    #[allow(clippy::type_complexity)]
1319    fn eval_global_memory_init<AB: SP1AirBuilder>(
1320        public_values: &PublicValues<
1321            [AB::PublicVar; 4],
1322            [AB::PublicVar; 3],
1323            [AB::PublicVar; 4],
1324            AB::PublicVar,
1325        >,
1326        builder: &mut AB,
1327    ) {
1328        // Check the addresses are of valid u16 limbs.
1329        for i in 0..3 {
1330            builder.send_byte(
1331                AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
1332                public_values.previous_init_addr[i].into(),
1333                AB::Expr::from_canonical_u32(16),
1334                AB::Expr::zero(),
1335                AB::Expr::one(),
1336            );
1337            builder.send_byte(
1338                AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
1339                public_values.last_init_addr[i].into(),
1340                AB::Expr::from_canonical_u32(16),
1341                AB::Expr::zero(),
1342                AB::Expr::one(),
1343            );
1344        }
1345
1346        builder.send(
1347            AirInteraction::new(
1348                once(AB::Expr::zero())
1349                    .chain(public_values.previous_init_addr.into_iter().map(Into::into))
1350                    .chain(once(AB::Expr::one()))
1351                    .collect(),
1352                AB::Expr::one(),
1353                InteractionKind::MemoryGlobalInitControl,
1354            ),
1355            InteractionScope::Local,
1356        );
1357        builder.receive(
1358            AirInteraction::new(
1359                once(public_values.global_init_count.into())
1360                    .chain(public_values.last_init_addr.into_iter().map(Into::into))
1361                    .chain(once(AB::Expr::one()))
1362                    .collect(),
1363                AB::Expr::one(),
1364                InteractionKind::MemoryGlobalInitControl,
1365            ),
1366            InteractionScope::Local,
1367        );
1368    }
1369
1370    #[allow(clippy::type_complexity)]
1371    fn eval_global_memory_finalize<AB: SP1AirBuilder>(
1372        public_values: &PublicValues<
1373            [AB::PublicVar; 4],
1374            [AB::PublicVar; 3],
1375            [AB::PublicVar; 4],
1376            AB::PublicVar,
1377        >,
1378        builder: &mut AB,
1379    ) {
1380        // Check the addresses are of valid u16 limbs.
1381        for i in 0..3 {
1382            builder.send_byte(
1383                AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
1384                public_values.previous_finalize_addr[i].into(),
1385                AB::Expr::from_canonical_u32(16),
1386                AB::Expr::zero(),
1387                AB::Expr::one(),
1388            );
1389            builder.send_byte(
1390                AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
1391                public_values.last_finalize_addr[i].into(),
1392                AB::Expr::from_canonical_u32(16),
1393                AB::Expr::zero(),
1394                AB::Expr::one(),
1395            );
1396        }
1397
1398        builder.send(
1399            AirInteraction::new(
1400                once(AB::Expr::zero())
1401                    .chain(public_values.previous_finalize_addr.into_iter().map(Into::into))
1402                    .chain(once(AB::Expr::one()))
1403                    .collect(),
1404                AB::Expr::one(),
1405                InteractionKind::MemoryGlobalFinalizeControl,
1406            ),
1407            InteractionScope::Local,
1408        );
1409        builder.receive(
1410            AirInteraction::new(
1411                once(public_values.global_finalize_count.into())
1412                    .chain(public_values.last_finalize_addr.into_iter().map(Into::into))
1413                    .chain(once(AB::Expr::one()))
1414                    .collect(),
1415                AB::Expr::one(),
1416                InteractionKind::MemoryGlobalFinalizeControl,
1417            ),
1418            InteractionScope::Local,
1419        );
1420    }
1421
1422    #[allow(clippy::type_complexity)]
1423    fn eval_global_page_prot_init<AB: SP1AirBuilder>(
1424        public_values: &PublicValues<
1425            [AB::PublicVar; 4],
1426            [AB::PublicVar; 3],
1427            [AB::PublicVar; 4],
1428            AB::PublicVar,
1429        >,
1430        builder: &mut AB,
1431    ) {
1432        builder.assert_bool(public_values.is_untrusted_programs_enabled.into());
1433        builder.send(
1434            AirInteraction::new(
1435                once(AB::Expr::zero())
1436                    .chain(public_values.previous_init_page_idx.into_iter().map(Into::into))
1437                    .chain(once(AB::Expr::one()))
1438                    .collect(),
1439                public_values.is_untrusted_programs_enabled.into(),
1440                InteractionKind::PageProtGlobalInitControl,
1441            ),
1442            InteractionScope::Local,
1443        );
1444        builder.receive(
1445            AirInteraction::new(
1446                once(public_values.global_page_prot_init_count.into())
1447                    .chain(public_values.last_init_page_idx.into_iter().map(Into::into))
1448                    .chain(once(AB::Expr::one()))
1449                    .collect(),
1450                public_values.is_untrusted_programs_enabled.into(),
1451                InteractionKind::PageProtGlobalInitControl,
1452            ),
1453            InteractionScope::Local,
1454        );
1455    }
1456
1457    #[allow(clippy::type_complexity)]
1458    fn eval_global_page_prot_finalize<AB: SP1AirBuilder>(
1459        public_values: &PublicValues<
1460            [AB::PublicVar; 4],
1461            [AB::PublicVar; 3],
1462            [AB::PublicVar; 4],
1463            AB::PublicVar,
1464        >,
1465        builder: &mut AB,
1466    ) {
1467        builder.assert_bool(public_values.is_untrusted_programs_enabled.into());
1468        builder.send(
1469            AirInteraction::new(
1470                once(AB::Expr::zero())
1471                    .chain(public_values.previous_finalize_page_idx.into_iter().map(Into::into))
1472                    .chain(once(AB::Expr::one()))
1473                    .collect(),
1474                public_values.is_untrusted_programs_enabled.into(),
1475                InteractionKind::PageProtGlobalFinalizeControl,
1476            ),
1477            InteractionScope::Local,
1478        );
1479        builder.receive(
1480            AirInteraction::new(
1481                once(public_values.global_page_prot_finalize_count.into())
1482                    .chain(public_values.last_finalize_page_idx.into_iter().map(Into::into))
1483                    .chain(once(AB::Expr::one()))
1484                    .collect(),
1485                public_values.is_untrusted_programs_enabled.into(),
1486                InteractionKind::PageProtGlobalFinalizeControl,
1487            ),
1488            InteractionScope::Local,
1489        );
1490    }
1491
1492    /// Finalize the public values.
1493    pub fn finalize_public_values<F: PrimeField32>(&mut self, is_execution_shard: bool) {
1494        let state = &mut self.public_values;
1495        state.is_execution_shard = is_execution_shard as u32;
1496
1497        let initial_timestamp_high = (state.initial_timestamp >> 24) as u32;
1498        let initial_timestamp_low = (state.initial_timestamp & 0xFFFFFF) as u32;
1499        let last_timestamp_high = (state.last_timestamp >> 24) as u32;
1500        let last_timestamp_low = (state.last_timestamp & 0xFFFFFF) as u32;
1501
1502        state.initial_timestamp_inv = if state.initial_timestamp == 1 {
1503            0
1504        } else {
1505            F::from_canonical_u32(initial_timestamp_high + initial_timestamp_low - 1)
1506                .inverse()
1507                .as_canonical_u32()
1508        };
1509
1510        state.last_timestamp_inv =
1511            F::from_canonical_u32(last_timestamp_high + last_timestamp_low - 1)
1512                .inverse()
1513                .as_canonical_u32();
1514
1515        if initial_timestamp_high == last_timestamp_high {
1516            state.is_timestamp_high_eq = 1;
1517        } else {
1518            state.is_timestamp_high_eq = 0;
1519            state.inv_timestamp_high = (F::from_canonical_u32(last_timestamp_high)
1520                - F::from_canonical_u32(initial_timestamp_high))
1521            .inverse()
1522            .as_canonical_u32();
1523        }
1524
1525        if initial_timestamp_low == last_timestamp_low {
1526            state.is_timestamp_low_eq = 1;
1527        } else {
1528            state.is_timestamp_low_eq = 0;
1529            state.inv_timestamp_low = (F::from_canonical_u32(last_timestamp_low)
1530                - F::from_canonical_u32(initial_timestamp_low))
1531            .inverse()
1532            .as_canonical_u32();
1533        }
1534        state.is_first_execution_shard = (state.initial_timestamp == 1) as u32;
1535    }
1536}