Skip to main content

sp1_core_executor/
record.rs

1use deepsize2::DeepSizeOf;
2use hashbrown::HashMap;
3use slop_air::AirBuilder;
4use slop_algebra::{AbstractField, Field, PrimeField, PrimeField32};
5use sp1_hypercube::{
6    air::{
7        AirInteraction, BaseAirBuilder, InteractionScope, MachineAir, PublicValues, SP1AirBuilder,
8        PROOF_NONCE_NUM_WORDS, PV_DIGEST_NUM_WORDS, SP1_PROOF_NUM_PV_ELTS,
9    },
10    septic_digest::SepticDigest,
11    shape::Shape,
12    InteractionKind, MachineRecord,
13};
14use std::{
15    borrow::Borrow,
16    iter::once,
17    mem::take,
18    sync::{Arc, Mutex},
19};
20
21use serde::{Deserialize, Serialize};
22
23use crate::{
24    events::{
25        AluEvent, BranchEvent, ByteLookupEvent, ByteRecord, GlobalInteractionEvent,
26        InstructionDecodeEvent, InstructionFetchEvent, JumpEvent, MemInstrEvent,
27        MemoryInitializeFinalizeEvent, MemoryLocalEvent, MemoryRecordEnum,
28        PageProtInitializeFinalizeEvent, PageProtLocalEvent, PrecompileEvent, PrecompileEvents,
29        SyscallEvent, UTypeEvent,
30    },
31    program::Program,
32    ByteOpcode, Instruction, RetainedEventsPreset, RiscvAirId, SplitOpts, SyscallCode,
33};
34
35/// A record of the execution of a program.
36///
37/// The trace of the execution is represented as a list of "events" that occur every cycle.
38#[derive(Clone, Debug, Serialize, Deserialize, Default, DeepSizeOf)]
39pub struct ExecutionRecord {
40    /// The program.
41    pub program: Arc<Program>,
42    /// The number of CPU related events.
43    pub cpu_event_count: u32,
44    /// A trace of ALU events with `rd = x0`.
45    pub alu_x0_events: Vec<(AluEvent, ALUTypeRecord)>,
46    /// A trace of the ADD, and ADDI events.
47    pub add_events: Vec<(AluEvent, RTypeRecord)>,
48    /// A trace of the ADDW events.
49    pub addw_events: Vec<(AluEvent, ALUTypeRecord)>,
50    /// A trace of the ADDI events.
51    pub addi_events: Vec<(AluEvent, ITypeRecord)>,
52    /// A trace of the MUL events.
53    pub mul_events: Vec<(AluEvent, RTypeRecord)>,
54    /// A trace of the SUB events.
55    pub sub_events: Vec<(AluEvent, RTypeRecord)>,
56    /// A trace of the SUBW events.
57    pub subw_events: Vec<(AluEvent, RTypeRecord)>,
58    /// A trace of the XOR, XORI, OR, ORI, AND, and ANDI events.
59    pub bitwise_events: Vec<(AluEvent, ALUTypeRecord)>,
60    /// A trace of the SLL and SLLI events.
61    pub shift_left_events: Vec<(AluEvent, ALUTypeRecord)>,
62    /// A trace of the SRL, SRLI, SRA, and SRAI events.
63    pub shift_right_events: Vec<(AluEvent, ALUTypeRecord)>,
64    /// A trace of the DIV, DIVU, REM, and REMU events.
65    pub divrem_events: Vec<(AluEvent, RTypeRecord)>,
66    /// A trace of the SLT, SLTI, SLTU, and SLTIU events.
67    pub lt_events: Vec<(AluEvent, ALUTypeRecord)>,
68    /// A trace of load byte instructions.
69    pub memory_load_byte_events: Vec<(MemInstrEvent, ITypeRecord)>,
70    /// A trace of load half instructions.
71    pub memory_load_half_events: Vec<(MemInstrEvent, ITypeRecord)>,
72    /// A trace of load word instructions.
73    pub memory_load_word_events: Vec<(MemInstrEvent, ITypeRecord)>,
74    /// A trace of load instructions with `op_a = x0`.
75    pub memory_load_x0_events: Vec<(MemInstrEvent, ITypeRecord)>,
76    /// A trace of load double instructions.
77    pub memory_load_double_events: Vec<(MemInstrEvent, ITypeRecord)>,
78    /// A trace of store byte instructions.
79    pub memory_store_byte_events: Vec<(MemInstrEvent, ITypeRecord)>,
80    /// A trace of store half instructions.
81    pub memory_store_half_events: Vec<(MemInstrEvent, ITypeRecord)>,
82    /// A trace of store word instructions.
83    pub memory_store_word_events: Vec<(MemInstrEvent, ITypeRecord)>,
84    /// A trace of store double instructions.
85    pub memory_store_double_events: Vec<(MemInstrEvent, ITypeRecord)>,
86    /// A trace of the AUIPC and LUI events.
87    pub utype_events: Vec<(UTypeEvent, JTypeRecord)>,
88    /// A trace of the branch events.
89    pub branch_events: Vec<(BranchEvent, ITypeRecord)>,
90    /// A trace of the JAL events.
91    pub jal_events: Vec<(JumpEvent, JTypeRecord)>,
92    /// A trace of the JALR events.
93    pub jalr_events: Vec<(JumpEvent, ITypeRecord)>,
94    /// A trace of the byte lookups that are needed.
95    pub byte_lookups: HashMap<ByteLookupEvent, usize>,
96    /// A trace of the precompile events.
97    pub precompile_events: PrecompileEvents,
98    /// A trace of the global memory initialize events.
99    pub global_memory_initialize_events: Vec<MemoryInitializeFinalizeEvent>,
100    /// A trace of the global memory finalize events.
101    pub global_memory_finalize_events: Vec<MemoryInitializeFinalizeEvent>,
102    /// A trace of the global page prot initialize events.
103    pub global_page_prot_initialize_events: Vec<PageProtInitializeFinalizeEvent>,
104    /// A trace of the global page prot finalize events.
105    pub global_page_prot_finalize_events: Vec<PageProtInitializeFinalizeEvent>,
106    /// A trace of all the shard's local memory events.
107    pub cpu_local_memory_access: Vec<MemoryLocalEvent>,
108    /// A trace of all the local page prot events.
109    pub cpu_local_page_prot_access: Vec<PageProtLocalEvent>,
110    /// A trace of all the syscall events.
111    pub syscall_events: Vec<(SyscallEvent, RTypeRecord)>,
112    /// A trace of all the global interaction events.
113    pub global_interaction_events: Vec<GlobalInteractionEvent>,
114    /// A trace of all instruction fetch events.
115    pub instruction_fetch_events: Vec<(InstructionFetchEvent, MemoryAccessRecord)>,
116    /// A trace of all instruction decode events.
117    pub instruction_decode_events: Vec<InstructionDecodeEvent>,
118    /// The global culmulative sum.
119    pub global_cumulative_sum: Arc<Mutex<SepticDigest<u32>>>,
120    /// The global interaction event count.
121    pub global_interaction_event_count: u32,
122    /// Memory records used to bump the timestamp of the register memory access.
123    pub bump_memory_events: Vec<(MemoryRecordEnum, u64, bool)>,
124    /// Record where the `clk >> 24` or `pc >> 16` has incremented.
125    pub bump_state_events: Vec<(u64, u64, bool, u64)>,
126    /// The public values.
127    pub public_values: PublicValues<u32, u64, u64, u32>,
128    /// The next nonce to use for a new lookup.
129    pub next_nonce: u64,
130    /// The shape of the proof.
131    pub shape: Option<Shape<RiscvAirId>>,
132    /// The estimated total trace area of the proof.
133    pub estimated_trace_area: u64,
134    /// The initial timestamp of the shard.
135    pub initial_timestamp: u64,
136    /// The final timestamp of the shard.
137    pub last_timestamp: u64,
138    /// The start program counter.
139    pub pc_start: Option<u64>,
140    /// The final program counter.
141    pub next_pc: u64,
142    /// The exit code.
143    pub exit_code: u32,
144    /// Use optimized `generate_dependencies` for global chip.
145    pub global_dependencies_opt: bool,
146}
147
148impl ExecutionRecord {
149    /// Create a new [`ExecutionRecord`].
150    #[must_use]
151    pub fn new(
152        program: Arc<Program>,
153        proof_nonce: [u32; PROOF_NONCE_NUM_WORDS],
154        global_dependencies_opt: bool,
155    ) -> Self {
156        let mut result = Self { program, ..Default::default() };
157        result.public_values.proof_nonce = proof_nonce;
158        result.global_dependencies_opt = global_dependencies_opt;
159        result
160    }
161
162    /// Create a new [`ExecutionRecord`] with preallocated event vecs.
163    #[must_use]
164    pub fn new_preallocated(
165        program: Arc<Program>,
166        proof_nonce: [u32; PROOF_NONCE_NUM_WORDS],
167        global_dependencies_opt: bool,
168        reservation_size: usize,
169    ) -> Self {
170        let mut result = Self { program, ..Default::default() };
171
172        result.alu_x0_events.reserve(reservation_size);
173        result.add_events.reserve(reservation_size);
174        result.addi_events.reserve(reservation_size);
175        result.addw_events.reserve(reservation_size);
176        result.mul_events.reserve(reservation_size);
177        result.sub_events.reserve(reservation_size);
178        result.subw_events.reserve(reservation_size);
179        result.bitwise_events.reserve(reservation_size);
180        result.shift_left_events.reserve(reservation_size);
181        result.shift_right_events.reserve(reservation_size);
182        result.divrem_events.reserve(reservation_size);
183        result.lt_events.reserve(reservation_size);
184        result.branch_events.reserve(reservation_size);
185        result.jal_events.reserve(reservation_size);
186        result.jalr_events.reserve(reservation_size);
187        result.utype_events.reserve(reservation_size);
188        result.memory_load_x0_events.reserve(reservation_size);
189        result.memory_load_byte_events.reserve(reservation_size);
190        result.memory_load_half_events.reserve(reservation_size);
191        result.memory_load_word_events.reserve(reservation_size);
192        result.memory_load_double_events.reserve(reservation_size);
193        result.memory_store_byte_events.reserve(reservation_size);
194        result.memory_store_half_events.reserve(reservation_size);
195        result.memory_store_word_events.reserve(reservation_size);
196        result.memory_store_double_events.reserve(reservation_size);
197        result.global_memory_initialize_events.reserve(reservation_size);
198        result.global_memory_finalize_events.reserve(reservation_size);
199        result.global_interaction_events.reserve(reservation_size);
200        result.byte_lookups.reserve(reservation_size);
201
202        result.public_values.proof_nonce = proof_nonce;
203        result.global_dependencies_opt = global_dependencies_opt;
204        result
205    }
206
207    /// Take out events from the [`ExecutionRecord`] that should be deferred to a separate shard.
208    ///
209    /// Note: we usually defer events that would increase the recursion cost significantly if
210    /// included in every shard.
211    #[must_use]
212    pub fn defer<'a>(
213        &mut self,
214        retain_presets: impl IntoIterator<Item = &'a RetainedEventsPreset>,
215    ) -> ExecutionRecord {
216        let mut execution_record = ExecutionRecord::new(
217            self.program.clone(),
218            self.public_values.proof_nonce,
219            self.global_dependencies_opt,
220        );
221        execution_record.precompile_events = std::mem::take(&mut self.precompile_events);
222
223        // Take back the events that should be retained.
224        self.precompile_events.events.extend(
225            retain_presets.into_iter().flat_map(RetainedEventsPreset::syscall_codes).filter_map(
226                |code| execution_record.precompile_events.events.remove(code).map(|x| (*code, x)),
227            ),
228        );
229
230        execution_record.global_memory_initialize_events =
231            std::mem::take(&mut self.global_memory_initialize_events);
232        execution_record.global_memory_finalize_events =
233            std::mem::take(&mut self.global_memory_finalize_events);
234        execution_record.global_page_prot_initialize_events =
235            std::mem::take(&mut self.global_page_prot_initialize_events);
236        execution_record.global_page_prot_finalize_events =
237            std::mem::take(&mut self.global_page_prot_finalize_events);
238        execution_record
239    }
240
241    /// Splits the deferred [`ExecutionRecord`] into multiple [`ExecutionRecord`]s, each which
242    /// contain a "reasonable" number of deferred events.
243    #[allow(clippy::too_many_lines)]
244    pub fn split(
245        &mut self,
246        done: bool,
247        last_record: &mut ExecutionRecord,
248        can_pack_global_memory: bool,
249        opts: &SplitOpts,
250    ) -> Vec<ExecutionRecord> {
251        let mut shards = Vec::new();
252
253        let precompile_events = take(&mut self.precompile_events);
254
255        for (syscall_code, events) in precompile_events.into_iter() {
256            let threshold: usize = opts.syscall_threshold[syscall_code];
257
258            let chunks = events.chunks_exact(threshold);
259            if done {
260                let remainder = chunks.remainder().to_vec();
261                if !remainder.is_empty() {
262                    let mut execution_record = ExecutionRecord::new(
263                        self.program.clone(),
264                        self.public_values.proof_nonce,
265                        self.global_dependencies_opt,
266                    );
267                    execution_record.precompile_events.insert(syscall_code, remainder);
268                    execution_record.public_values.update_initialized_state(
269                        self.program.pc_start_abs,
270                        self.program.enable_untrusted_programs,
271                    );
272                    shards.push(execution_record);
273                }
274            } else {
275                self.precompile_events.insert(syscall_code, chunks.remainder().to_vec());
276            }
277            let mut event_shards = chunks
278                .map(|chunk| {
279                    let mut execution_record = ExecutionRecord::new(
280                        self.program.clone(),
281                        self.public_values.proof_nonce,
282                        self.global_dependencies_opt,
283                    );
284                    execution_record.precompile_events.insert(syscall_code, chunk.to_vec());
285                    execution_record.public_values.update_initialized_state(
286                        self.program.pc_start_abs,
287                        self.program.enable_untrusted_programs,
288                    );
289                    execution_record
290                })
291                .collect::<Vec<_>>();
292            shards.append(&mut event_shards);
293        }
294
295        if done {
296            // If there are no precompile shards, and `last_record` is Some, pack the memory events
297            // into the last record.
298            let pack_memory_events_into_last_record = can_pack_global_memory && shards.is_empty();
299            let mut blank_record = ExecutionRecord::new(
300                self.program.clone(),
301                self.public_values.proof_nonce,
302                self.global_dependencies_opt,
303            );
304
305            // Clone the public values of the last record to update the last record's public values.
306            let last_record_public_values = last_record.public_values;
307
308            // Update the state of the blank record
309            blank_record
310                .public_values
311                .update_finalized_state_from_public_values(&last_record_public_values);
312
313            // If `last_record` is None, use a blank record to store the memory events.
314            let mem_record_ref =
315                if pack_memory_events_into_last_record { last_record } else { &mut blank_record };
316
317            let mut init_page_idx = 0;
318            let mut finalize_page_idx = 0;
319
320            // Put all of the page prot init and finalize events into the last record.
321            if !self.global_page_prot_initialize_events.is_empty()
322                || !self.global_page_prot_finalize_events.is_empty()
323            {
324                self.global_page_prot_initialize_events.sort_by_key(|event| event.page_idx);
325                self.global_page_prot_finalize_events.sort_by_key(|event| event.page_idx);
326
327                let init_iter = self.global_page_prot_initialize_events.iter();
328                let finalize_iter = self.global_page_prot_finalize_events.iter();
329                let mut init_remaining = init_iter.as_slice();
330                let mut finalize_remaining = finalize_iter.as_slice();
331
332                while !init_remaining.is_empty() || !finalize_remaining.is_empty() {
333                    let capacity = 2 * opts.page_prot;
334                    let init_to_take = init_remaining.len().min(capacity);
335                    let finalize_to_take = finalize_remaining.len().min(capacity - init_to_take);
336
337                    let finalize_to_take = if init_to_take < capacity {
338                        finalize_to_take.max(finalize_remaining.len().min(capacity - init_to_take))
339                    } else {
340                        0
341                    };
342
343                    let page_prot_init_chunk = &init_remaining[..init_to_take];
344                    let page_prot_finalize_chunk = &finalize_remaining[..finalize_to_take];
345
346                    mem_record_ref
347                        .global_page_prot_initialize_events
348                        .extend_from_slice(page_prot_init_chunk);
349                    mem_record_ref.public_values.previous_init_page_idx = init_page_idx;
350                    if let Some(last_event) = page_prot_init_chunk.last() {
351                        init_page_idx = last_event.page_idx;
352                    }
353                    mem_record_ref.public_values.last_init_page_idx = init_page_idx;
354
355                    mem_record_ref
356                        .global_page_prot_finalize_events
357                        .extend_from_slice(page_prot_finalize_chunk);
358                    mem_record_ref.public_values.previous_finalize_page_idx = finalize_page_idx;
359                    if let Some(last_event) = page_prot_finalize_chunk.last() {
360                        finalize_page_idx = last_event.page_idx;
361                    }
362                    mem_record_ref.public_values.last_finalize_page_idx = finalize_page_idx;
363
364                    // Because page prot events are non empty, we set the page protect active flag
365                    mem_record_ref.public_values.is_untrusted_programs_enabled = true as u32;
366
367                    init_remaining = &init_remaining[init_to_take..];
368                    finalize_remaining = &finalize_remaining[finalize_to_take..];
369
370                    // Ensure last record has same proof nonce as other shards
371                    mem_record_ref.public_values.proof_nonce = self.public_values.proof_nonce;
372                    mem_record_ref.global_dependencies_opt = self.global_dependencies_opt;
373
374                    if !pack_memory_events_into_last_record {
375                        // If not packing memory events into the last record, add 'last_record_ref'
376                        // to the returned records. `take` replaces `blank_program` with the
377                        // default.
378                        shards.push(take(mem_record_ref));
379
380                        // Reset the last record so its program is the correct one. (The default
381                        // program provided by `take` contains no
382                        // instructions.)
383                        mem_record_ref.program = self.program.clone();
384                        // Reset the public values execution state to match the last record state.
385                        mem_record_ref
386                            .public_values
387                            .update_finalized_state_from_public_values(&last_record_public_values);
388                    }
389                }
390            }
391
392            self.global_memory_initialize_events.sort_by_key(|event| event.addr);
393            self.global_memory_finalize_events.sort_by_key(|event| event.addr);
394
395            let mut init_addr = 0;
396            let mut finalize_addr = 0;
397
398            let mut mem_init_remaining = self.global_memory_initialize_events.as_slice();
399            let mut mem_finalize_remaining = self.global_memory_finalize_events.as_slice();
400
401            while !mem_init_remaining.is_empty() || !mem_finalize_remaining.is_empty() {
402                let capacity = 2 * opts.memory;
403                let init_to_take = mem_init_remaining.len().min(capacity);
404                let finalize_to_take = mem_finalize_remaining.len().min(capacity - init_to_take);
405
406                let finalize_to_take = if init_to_take < capacity {
407                    finalize_to_take.max(mem_finalize_remaining.len().min(capacity - init_to_take))
408                } else {
409                    0
410                };
411
412                let mem_init_chunk = &mem_init_remaining[..init_to_take];
413                let mem_finalize_chunk = &mem_finalize_remaining[..finalize_to_take];
414
415                mem_record_ref.global_memory_initialize_events.extend_from_slice(mem_init_chunk);
416                mem_record_ref.public_values.previous_init_addr = init_addr;
417                if let Some(last_event) = mem_init_chunk.last() {
418                    init_addr = last_event.addr;
419                }
420                mem_record_ref.public_values.last_init_addr = init_addr;
421
422                mem_record_ref.global_memory_finalize_events.extend_from_slice(mem_finalize_chunk);
423                mem_record_ref.public_values.previous_finalize_addr = finalize_addr;
424                if let Some(last_event) = mem_finalize_chunk.last() {
425                    finalize_addr = last_event.addr;
426                }
427                mem_record_ref.public_values.last_finalize_addr = finalize_addr;
428
429                mem_record_ref.public_values.proof_nonce = self.public_values.proof_nonce;
430                mem_record_ref.global_dependencies_opt = self.global_dependencies_opt;
431
432                mem_init_remaining = &mem_init_remaining[init_to_take..];
433                mem_finalize_remaining = &mem_finalize_remaining[finalize_to_take..];
434
435                if !pack_memory_events_into_last_record {
436                    mem_record_ref.public_values.previous_init_page_idx = init_page_idx;
437                    mem_record_ref.public_values.last_init_page_idx = init_page_idx;
438                    mem_record_ref.public_values.previous_finalize_page_idx = finalize_page_idx;
439                    mem_record_ref.public_values.last_finalize_page_idx = finalize_page_idx;
440
441                    // If not packing memory events into the last record, add 'last_record_ref'
442                    // to the returned records. `take` replaces `blank_program` with the default.
443                    shards.push(take(mem_record_ref));
444
445                    // Reset the last record so its program is the correct one. (The default program
446                    // provided by `take` contains no instructions.)
447                    mem_record_ref.program = self.program.clone();
448                    // Reset the public values execution state to match the last record state.
449                    mem_record_ref
450                        .public_values
451                        .update_finalized_state_from_public_values(&last_record_public_values);
452                }
453            }
454        }
455
456        shards
457    }
458
459    /// Return the number of rows needed for a chip, according to the proof shape specified in the
460    /// struct.
461    ///
462    /// **deprecated**: TODO: remove this method.
463    pub fn fixed_log2_rows<F: PrimeField, A: MachineAir<F>>(&self, _air: &A) -> Option<usize> {
464        None
465    }
466
467    /// Determines whether the execution record contains CPU events.
468    #[must_use]
469    pub fn contains_cpu(&self) -> bool {
470        self.cpu_event_count > 0
471    }
472
473    #[inline]
474    /// Add a precompile event to the execution record.
475    pub fn add_precompile_event(
476        &mut self,
477        syscall_code: SyscallCode,
478        syscall_event: SyscallEvent,
479        event: PrecompileEvent,
480    ) {
481        self.precompile_events.add_event(syscall_code, syscall_event, event);
482    }
483
484    /// Get all the precompile events for a syscall code.
485    #[inline]
486    #[must_use]
487    pub fn get_precompile_events(
488        &self,
489        syscall_code: SyscallCode,
490    ) -> &Vec<(SyscallEvent, PrecompileEvent)> {
491        self.precompile_events.get_events(syscall_code).expect("Precompile events not found")
492    }
493
494    /// Get all the local memory events.
495    #[inline]
496    pub fn get_local_mem_events(&self) -> impl Iterator<Item = &MemoryLocalEvent> {
497        let precompile_local_mem_events = self.precompile_events.get_local_mem_events();
498        precompile_local_mem_events.chain(self.cpu_local_memory_access.iter())
499    }
500
501    /// Get all the local page prot events.
502    #[inline]
503    pub fn get_local_page_prot_events(&self) -> impl Iterator<Item = &PageProtLocalEvent> {
504        let precompile_local_page_prot_events = self.precompile_events.get_local_page_prot_events();
505        precompile_local_page_prot_events.chain(self.cpu_local_page_prot_access.iter())
506    }
507
508    /// Reset the record, without deallocating the event vecs.
509    #[inline]
510    pub fn reset(&mut self) {
511        self.alu_x0_events.truncate(0);
512        self.add_events.truncate(0);
513        self.addw_events.truncate(0);
514        self.addi_events.truncate(0);
515        self.mul_events.truncate(0);
516        self.sub_events.truncate(0);
517        self.subw_events.truncate(0);
518        self.bitwise_events.truncate(0);
519        self.shift_left_events.truncate(0);
520        self.shift_right_events.truncate(0);
521        self.divrem_events.truncate(0);
522        self.lt_events.truncate(0);
523        self.memory_load_byte_events.truncate(0);
524        self.memory_load_half_events.truncate(0);
525        self.memory_load_word_events.truncate(0);
526        self.memory_load_x0_events.truncate(0);
527        self.memory_load_double_events.truncate(0);
528        self.memory_store_byte_events.truncate(0);
529        self.memory_store_half_events.truncate(0);
530        self.memory_store_word_events.truncate(0);
531        self.memory_store_double_events.truncate(0);
532        self.utype_events.truncate(0);
533        self.branch_events.truncate(0);
534        self.jal_events.truncate(0);
535        self.jalr_events.truncate(0);
536        self.byte_lookups.clear();
537        self.precompile_events = PrecompileEvents::default();
538        self.global_memory_initialize_events.truncate(0);
539        self.global_memory_finalize_events.truncate(0);
540        self.global_page_prot_initialize_events.truncate(0);
541        self.global_page_prot_finalize_events.truncate(0);
542        self.cpu_local_memory_access.truncate(0);
543        self.cpu_local_page_prot_access.truncate(0);
544        self.syscall_events.truncate(0);
545        self.global_interaction_events.truncate(0);
546        self.instruction_fetch_events.truncate(0);
547        self.instruction_decode_events.truncate(0);
548        let mut cumulative_sum = self.global_cumulative_sum.lock().unwrap();
549        *cumulative_sum = SepticDigest::default();
550        self.global_interaction_event_count = 0;
551        self.bump_memory_events.truncate(0);
552        self.bump_state_events.truncate(0);
553        let _ = self.public_values.reset();
554        self.next_nonce = 0;
555        self.shape = None;
556        self.estimated_trace_area = 0;
557        self.initial_timestamp = 0;
558        self.last_timestamp = 0;
559        self.pc_start = None;
560        self.next_pc = 0;
561        self.exit_code = 0;
562    }
563}
564
565/// A memory access record.
566#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize, DeepSizeOf)]
567pub struct MemoryAccessRecord {
568    /// The memory access of the `a` register.
569    pub a: Option<MemoryRecordEnum>,
570    /// The memory access of the `b` register.
571    pub b: Option<MemoryRecordEnum>,
572    /// The memory access of the `c` register.
573    pub c: Option<MemoryRecordEnum>,
574    /// The memory access of the `memory` register.
575    pub memory: Option<MemoryRecordEnum>,
576    /// The memory access of the untrusted instruction.
577    /// If memory access for `untrusted_instruction` occurs, we also pass along the selected 32
578    /// bits that is the encoded 32 bit instruction alongside the raw 64bit read
579    pub untrusted_instruction: Option<(MemoryRecordEnum, u32)>,
580}
581
582/// Memory record where all three operands are registers.
583#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, DeepSizeOf)]
584pub struct RTypeRecord {
585    /// The a operand.
586    pub op_a: u8,
587    /// The register `op_a` record.
588    pub a: MemoryRecordEnum,
589    /// The b operand.
590    pub op_b: u64,
591    /// The register `op_b` record.
592    pub b: MemoryRecordEnum,
593    /// The c operand.
594    pub op_c: u64,
595    /// The register `op_c` record.
596    pub c: MemoryRecordEnum,
597    /// Whether the instruction is untrusted.
598    pub is_untrusted: bool,
599}
600
601impl RTypeRecord {
602    pub(crate) fn new(value: &MemoryAccessRecord, instruction: &Instruction) -> Self {
603        Self {
604            op_a: instruction.op_a,
605            a: value.a.expect("expected MemoryRecord for op_a in RTypeRecord"),
606            op_b: instruction.op_b,
607            b: value.b.expect("expected MemoryRecord for op_b in RTypeRecord"),
608            op_c: instruction.op_c,
609            c: value.c.expect("expected MemoryRecord for op_c in RTypeRecord"),
610            is_untrusted: value.untrusted_instruction.is_some(),
611        }
612    }
613}
614/// Memory record where the first two operands are registers.
615#[derive(Debug, Clone, Copy, Serialize, Deserialize, DeepSizeOf)]
616pub struct ITypeRecord {
617    /// The a operand.
618    pub op_a: u8,
619    /// The register `op_a` record.
620    pub a: MemoryRecordEnum,
621    /// The b operand.
622    pub op_b: u64,
623    /// The register `op_b` record.
624    pub b: MemoryRecordEnum,
625    /// The c operand.
626    pub op_c: u64,
627    /// Whether the instruction is untrusted.
628    pub is_untrusted: bool,
629}
630
631impl ITypeRecord {
632    pub(crate) fn new(value: &MemoryAccessRecord, instruction: &Instruction) -> Self {
633        debug_assert!(value.c.is_none());
634        Self {
635            op_a: instruction.op_a,
636            a: value.a.expect("expected MemoryRecord for op_a in ITypeRecord"),
637            op_b: instruction.op_b,
638            b: value.b.expect("expected MemoryRecord for op_b in ITypeRecord"),
639            op_c: instruction.op_c,
640            is_untrusted: value.untrusted_instruction.is_some(),
641        }
642    }
643}
644
645/// Memory record where only one operand is a register.
646#[derive(Debug, Clone, Copy, Serialize, Deserialize, DeepSizeOf)]
647pub struct JTypeRecord {
648    /// The a operand.
649    pub op_a: u8,
650    /// The register `op_a` record.
651    pub a: MemoryRecordEnum,
652    /// The b operand.
653    pub op_b: u64,
654    /// The c operand.
655    pub op_c: u64,
656    /// Whether the instruction is untrusted.
657    pub is_untrusted: bool,
658}
659
660impl JTypeRecord {
661    pub(crate) fn new(value: &MemoryAccessRecord, instruction: &Instruction) -> Self {
662        debug_assert!(value.b.is_none());
663        debug_assert!(value.c.is_none());
664        Self {
665            op_a: instruction.op_a,
666            a: value.a.expect("expected MemoryRecord for op_a in JTypeRecord"),
667            op_b: instruction.op_b,
668            op_c: instruction.op_c,
669            is_untrusted: value.untrusted_instruction.is_some(),
670        }
671    }
672}
673
674/// Memory record where only the first two operands are known to be registers, but the third isn't.
675#[derive(Debug, Clone, Copy, Serialize, Deserialize, DeepSizeOf)]
676pub struct ALUTypeRecord {
677    /// The a operand.
678    pub op_a: u8,
679    /// The register `op_a` record.
680    pub a: MemoryRecordEnum,
681    /// The b operand.
682    pub op_b: u64,
683    /// The register `op_b` record.
684    pub b: MemoryRecordEnum,
685    /// The c operand.
686    pub op_c: u64,
687    /// The register `op_c` record.
688    pub c: Option<MemoryRecordEnum>,
689    /// Whether the instruction has an immediate.
690    pub is_imm: bool,
691    /// Whether the instruction is untrusted.
692    pub is_untrusted: bool,
693}
694
695impl ALUTypeRecord {
696    pub(crate) fn new(value: &MemoryAccessRecord, instruction: &Instruction) -> Self {
697        Self {
698            op_a: instruction.op_a,
699            a: value.a.expect("expected MemoryRecord for op_a in ALUTypeRecord"),
700            op_b: instruction.op_b,
701            b: value.b.expect("expected MemoryRecord for op_b in ALUTypeRecord"),
702            op_c: instruction.op_c,
703            c: value.c,
704            is_imm: instruction.imm_c,
705            is_untrusted: value.untrusted_instruction.is_some(),
706        }
707    }
708}
709
710/// Memory record for an untrusted program instruction fetch.
711#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
712pub struct UntrustedProgramInstructionRecord {
713    /// The a operand.
714    pub memory_access_record: MemoryAccessRecord,
715    /// The instruction.
716    pub instruction: Instruction,
717    /// The encoded instruction.
718    pub encoded_instruction: u32,
719}
720
721impl MachineRecord for ExecutionRecord {
722    fn stats(&self) -> HashMap<String, usize> {
723        let mut stats = HashMap::new();
724        stats.insert("cpu_events".to_string(), self.cpu_event_count as usize);
725        stats.insert("alu_x0_events".to_string(), self.alu_x0_events.len());
726        stats.insert("add_events".to_string(), self.add_events.len());
727        stats.insert("mul_events".to_string(), self.mul_events.len());
728        stats.insert("sub_events".to_string(), self.sub_events.len());
729        stats.insert("bitwise_events".to_string(), self.bitwise_events.len());
730        stats.insert("shift_left_events".to_string(), self.shift_left_events.len());
731        stats.insert("shift_right_events".to_string(), self.shift_right_events.len());
732        stats.insert("divrem_events".to_string(), self.divrem_events.len());
733        stats.insert("lt_events".to_string(), self.lt_events.len());
734        stats.insert("load_byte_events".to_string(), self.memory_load_byte_events.len());
735        stats.insert("load_half_events".to_string(), self.memory_load_half_events.len());
736        stats.insert("load_word_events".to_string(), self.memory_load_word_events.len());
737        stats.insert("load_x0_events".to_string(), self.memory_load_x0_events.len());
738        stats.insert("store_byte_events".to_string(), self.memory_store_byte_events.len());
739        stats.insert("store_half_events".to_string(), self.memory_store_half_events.len());
740        stats.insert("store_word_events".to_string(), self.memory_store_word_events.len());
741        stats.insert("branch_events".to_string(), self.branch_events.len());
742        stats.insert("jal_events".to_string(), self.jal_events.len());
743        stats.insert("jalr_events".to_string(), self.jalr_events.len());
744        stats.insert("utype_events".to_string(), self.utype_events.len());
745        stats.insert("instruction_decode_events".to_string(), self.instruction_decode_events.len());
746        stats.insert("instruction_fetch_events".to_string(), self.instruction_fetch_events.len());
747
748        for (syscall_code, events) in self.precompile_events.iter() {
749            stats.insert(format!("syscall {syscall_code:?}"), events.len());
750        }
751
752        stats.insert(
753            "global_memory_initialize_events".to_string(),
754            self.global_memory_initialize_events.len(),
755        );
756        stats.insert(
757            "global_memory_finalize_events".to_string(),
758            self.global_memory_finalize_events.len(),
759        );
760        stats.insert("local_memory_access_events".to_string(), self.cpu_local_memory_access.len());
761        stats.insert(
762            "local_page_prot_access_events".to_string(),
763            self.cpu_local_page_prot_access.len(),
764        );
765        if self.contains_cpu() {
766            stats.insert("byte_lookups".to_string(), self.byte_lookups.len());
767        }
768        // Filter out the empty events.
769        stats.retain(|_, v| *v != 0);
770        stats
771    }
772
773    fn append(&mut self, other: &mut ExecutionRecord) {
774        self.cpu_event_count += other.cpu_event_count;
775        other.cpu_event_count = 0;
776        self.public_values.global_count += other.public_values.global_count;
777        other.public_values.global_count = 0;
778        self.public_values.global_init_count += other.public_values.global_init_count;
779        other.public_values.global_init_count = 0;
780        self.public_values.global_finalize_count += other.public_values.global_finalize_count;
781        other.public_values.global_finalize_count = 0;
782        self.public_values.global_page_prot_init_count +=
783            other.public_values.global_page_prot_init_count;
784        other.public_values.global_page_prot_init_count = 0;
785        self.public_values.global_page_prot_finalize_count +=
786            other.public_values.global_page_prot_finalize_count;
787        other.public_values.global_page_prot_finalize_count = 0;
788        self.estimated_trace_area += other.estimated_trace_area;
789        other.estimated_trace_area = 0;
790        self.alu_x0_events.append(&mut other.alu_x0_events);
791        self.add_events.append(&mut other.add_events);
792        self.sub_events.append(&mut other.sub_events);
793        self.mul_events.append(&mut other.mul_events);
794        self.bitwise_events.append(&mut other.bitwise_events);
795        self.shift_left_events.append(&mut other.shift_left_events);
796        self.shift_right_events.append(&mut other.shift_right_events);
797        self.divrem_events.append(&mut other.divrem_events);
798        self.lt_events.append(&mut other.lt_events);
799        self.memory_load_byte_events.append(&mut other.memory_load_byte_events);
800        self.memory_load_half_events.append(&mut other.memory_load_half_events);
801        self.memory_load_word_events.append(&mut other.memory_load_word_events);
802        self.memory_load_x0_events.append(&mut other.memory_load_x0_events);
803        self.memory_store_byte_events.append(&mut other.memory_store_byte_events);
804        self.memory_store_half_events.append(&mut other.memory_store_half_events);
805        self.memory_store_word_events.append(&mut other.memory_store_word_events);
806        self.branch_events.append(&mut other.branch_events);
807        self.jal_events.append(&mut other.jal_events);
808        self.jalr_events.append(&mut other.jalr_events);
809        self.utype_events.append(&mut other.utype_events);
810        self.syscall_events.append(&mut other.syscall_events);
811        self.bump_memory_events.append(&mut other.bump_memory_events);
812        self.bump_state_events.append(&mut other.bump_state_events);
813        self.precompile_events.append(&mut other.precompile_events);
814        self.instruction_fetch_events.append(&mut other.instruction_fetch_events);
815        self.instruction_decode_events.append(&mut other.instruction_decode_events);
816
817        if self.byte_lookups.is_empty() {
818            self.byte_lookups = std::mem::take(&mut other.byte_lookups);
819        } else {
820            self.add_byte_lookup_events_from_maps(vec![&other.byte_lookups]);
821        }
822
823        self.global_memory_initialize_events.append(&mut other.global_memory_initialize_events);
824        self.global_memory_finalize_events.append(&mut other.global_memory_finalize_events);
825        self.global_page_prot_initialize_events
826            .append(&mut other.global_page_prot_initialize_events);
827        self.global_page_prot_finalize_events.append(&mut other.global_page_prot_finalize_events);
828        self.cpu_local_memory_access.append(&mut other.cpu_local_memory_access);
829        self.cpu_local_page_prot_access.append(&mut other.cpu_local_page_prot_access);
830        self.global_interaction_events.append(&mut other.global_interaction_events);
831    }
832
833    /// Retrieves the public values.  This method is needed for the `MachineRecord` trait, since
834    fn public_values<F: AbstractField>(&self) -> Vec<F> {
835        let mut public_values = self.public_values;
836        public_values.global_cumulative_sum = *self.global_cumulative_sum.lock().unwrap();
837        public_values.to_vec()
838    }
839
840    /// Constrains the public values.
841    #[allow(clippy::type_complexity)]
842    fn eval_public_values<AB: SP1AirBuilder>(builder: &mut AB) {
843        let public_values_slice: [AB::PublicVar; SP1_PROOF_NUM_PV_ELTS] =
844            core::array::from_fn(|i| builder.public_values()[i]);
845        let public_values: &PublicValues<
846            [AB::PublicVar; 4],
847            [AB::PublicVar; 3],
848            [AB::PublicVar; 4],
849            AB::PublicVar,
850        > = public_values_slice.as_slice().borrow();
851
852        for var in public_values.empty {
853            builder.assert_zero(var);
854        }
855
856        Self::eval_state(public_values, builder);
857        Self::eval_first_execution_shard(public_values, builder);
858        Self::eval_exit_code(public_values, builder);
859        Self::eval_committed_value_digest(public_values, builder);
860        Self::eval_deferred_proofs_digest(public_values, builder);
861        Self::eval_global_sum(public_values, builder);
862        Self::eval_global_memory_init(public_values, builder);
863        Self::eval_global_memory_finalize(public_values, builder);
864        Self::eval_global_page_prot_init(public_values, builder);
865        Self::eval_global_page_prot_finalize(public_values, builder);
866    }
867
868    fn interactions_in_public_values() -> Vec<InteractionKind> {
869        InteractionKind::all_kinds()
870            .iter()
871            .filter(|kind| kind.appears_in_eval_public_values())
872            .copied()
873            .collect()
874    }
875}
876
877impl ByteRecord for ExecutionRecord {
878    fn add_byte_lookup_event(&mut self, blu_event: ByteLookupEvent) {
879        *self.byte_lookups.entry(blu_event).or_insert(0) += 1;
880    }
881
882    #[inline]
883    fn add_byte_lookup_events_from_maps(
884        &mut self,
885        new_events: Vec<&HashMap<ByteLookupEvent, usize>>,
886    ) {
887        for new_blu_map in new_events {
888            for (blu_event, count) in new_blu_map.iter() {
889                *self.byte_lookups.entry(*blu_event).or_insert(0) += count;
890            }
891        }
892    }
893}
894
895impl ExecutionRecord {
896    #[allow(clippy::type_complexity)]
897    fn eval_state<AB: SP1AirBuilder>(
898        public_values: &PublicValues<
899            [AB::PublicVar; 4],
900            [AB::PublicVar; 3],
901            [AB::PublicVar; 4],
902            AB::PublicVar,
903        >,
904        builder: &mut AB,
905    ) {
906        let initial_timestamp_high = public_values.initial_timestamp[1].into()
907            + public_values.initial_timestamp[0].into() * AB::Expr::from_canonical_u32(1 << 8);
908        let initial_timestamp_low = public_values.initial_timestamp[3].into()
909            + public_values.initial_timestamp[2].into() * AB::Expr::from_canonical_u32(1 << 16);
910        let last_timestamp_high = public_values.last_timestamp[1].into()
911            + public_values.last_timestamp[0].into() * AB::Expr::from_canonical_u32(1 << 8);
912        let last_timestamp_low = public_values.last_timestamp[3].into()
913            + public_values.last_timestamp[2].into() * AB::Expr::from_canonical_u32(1 << 16);
914
915        // Range check all the timestamp limbs.
916        builder.send_byte(
917            AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
918            public_values.initial_timestamp[0].into(),
919            AB::Expr::from_canonical_u32(16),
920            AB::Expr::zero(),
921            AB::Expr::one(),
922        );
923        builder.send_byte(
924            AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
925            (public_values.initial_timestamp[3].into() - AB::Expr::one())
926                * AB::F::from_canonical_u8(8).inverse(),
927            AB::Expr::from_canonical_u32(13),
928            AB::Expr::zero(),
929            AB::Expr::one(),
930        );
931        builder.send_byte(
932            AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
933            public_values.last_timestamp[0].into(),
934            AB::Expr::from_canonical_u32(16),
935            AB::Expr::zero(),
936            AB::Expr::one(),
937        );
938        builder.send_byte(
939            AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
940            (public_values.last_timestamp[3].into() - AB::Expr::one())
941                * AB::F::from_canonical_u8(8).inverse(),
942            AB::Expr::from_canonical_u32(13),
943            AB::Expr::zero(),
944            AB::Expr::one(),
945        );
946        builder.send_byte(
947            AB::Expr::from_canonical_u8(ByteOpcode::U8Range as u8),
948            AB::Expr::zero(),
949            public_values.initial_timestamp[1],
950            public_values.initial_timestamp[2],
951            AB::Expr::one(),
952        );
953        builder.send_byte(
954            AB::Expr::from_canonical_u8(ByteOpcode::U8Range as u8),
955            AB::Expr::zero(),
956            public_values.last_timestamp[1],
957            public_values.last_timestamp[2],
958            AB::Expr::one(),
959        );
960
961        // Range check all the initial, final program counter limbs.
962        for i in 0..3 {
963            builder.send_byte(
964                AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
965                public_values.pc_start[i].into(),
966                AB::Expr::from_canonical_u32(16),
967                AB::Expr::zero(),
968                AB::Expr::one(),
969            );
970            builder.send_byte(
971                AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
972                public_values.next_pc[i].into(),
973                AB::Expr::from_canonical_u32(16),
974                AB::Expr::zero(),
975                AB::Expr::one(),
976            );
977        }
978
979        // Send and receive the initial and last state.
980        builder.send_state(
981            initial_timestamp_high.clone(),
982            initial_timestamp_low.clone(),
983            public_values.pc_start,
984            AB::Expr::one(),
985        );
986        builder.receive_state(
987            last_timestamp_high.clone(),
988            last_timestamp_low.clone(),
989            public_values.next_pc,
990            AB::Expr::one(),
991        );
992
993        // If the shard is not execution shard, assert that timestamp and pc remains equal.
994        let is_execution_shard = public_values.is_execution_shard.into();
995        builder.assert_bool(is_execution_shard.clone());
996        builder
997            .when_not(is_execution_shard.clone())
998            .assert_eq(initial_timestamp_low.clone(), last_timestamp_low.clone());
999        builder
1000            .when_not(is_execution_shard.clone())
1001            .assert_eq(initial_timestamp_high.clone(), last_timestamp_high.clone());
1002        builder
1003            .when_not(is_execution_shard.clone())
1004            .assert_all_eq(public_values.pc_start, public_values.next_pc);
1005
1006        // IsZeroOperation on the high bits of the timestamp.
1007        builder.assert_bool(public_values.is_timestamp_high_eq);
1008        // If high bits are equal, then `is_timestamp_high_eq == 1`.
1009        builder.assert_eq(
1010            (last_timestamp_high.clone() - initial_timestamp_high.clone())
1011                * public_values.inv_timestamp_high.into(),
1012            AB::Expr::one() - public_values.is_timestamp_high_eq.into(),
1013        );
1014        // If high bits are distinct, then `is_timestamp_high_eq == 0`.
1015        builder.assert_zero(
1016            (last_timestamp_high.clone() - initial_timestamp_high.clone())
1017                * public_values.is_timestamp_high_eq.into(),
1018        );
1019
1020        // IsZeroOperation on the low bits of the timestamp.
1021        builder.assert_bool(public_values.is_timestamp_low_eq);
1022        // If low bits are equal, then `is_timestamp_low_eq == 1`.
1023        builder.assert_eq(
1024            (last_timestamp_low.clone() - initial_timestamp_low.clone())
1025                * public_values.inv_timestamp_low.into(),
1026            AB::Expr::one() - public_values.is_timestamp_low_eq.into(),
1027        );
1028        // If low bits are distinct, then `is_timestamp_low_eq == 0`.
1029        builder.assert_zero(
1030            (last_timestamp_low.clone() - initial_timestamp_low.clone())
1031                * public_values.is_timestamp_low_eq.into(),
1032        );
1033
1034        // If the shard is an execution shard, then the timestamp is different.
1035        builder.assert_eq(
1036            AB::Expr::one() - is_execution_shard.clone(),
1037            public_values.is_timestamp_high_eq.into() * public_values.is_timestamp_low_eq.into(),
1038        );
1039
1040        // Check that an execution shard has `last_timestamp != 1` by providing an inverse.
1041        // The `high + low` value cannot overflow, as they were range checked to be 24 bits.
1042        // `high == 1, low == 0` is impossible, as `low == 1 (mod 8)` as checked in `eval_state`.
1043        builder.when(is_execution_shard.clone()).assert_eq(
1044            (last_timestamp_high + last_timestamp_low - AB::Expr::one())
1045                * public_values.last_timestamp_inv.into(),
1046            AB::Expr::one(),
1047        );
1048    }
1049
1050    #[allow(clippy::type_complexity)]
1051    fn eval_first_execution_shard<AB: SP1AirBuilder>(
1052        public_values: &PublicValues<
1053            [AB::PublicVar; 4],
1054            [AB::PublicVar; 3],
1055            [AB::PublicVar; 4],
1056            AB::PublicVar,
1057        >,
1058        builder: &mut AB,
1059    ) {
1060        // Check that `is_first_execution_shard` is boolean.
1061        builder.assert_bool(public_values.is_first_execution_shard.into());
1062
1063        // Timestamp constraints.
1064        //
1065        // We want to assert that `is_first_execution_shard == 1` corresponds exactly to the unique
1066        // execution shard with initial timestamp 1.We are assuming that there is a unique
1067        // shard with `is_first_execution_shard == 1`. This is enforced in the verifier and
1068        // in recursion. Given thus, it is enough to impose that for this unique shard,
1069        // `initial_timestamp == 1`.
1070        builder.when(public_values.is_first_execution_shard.into()).assert_all_eq(
1071            public_values.initial_timestamp,
1072            [AB::Expr::zero(), AB::Expr::zero(), AB::Expr::zero(), AB::Expr::one()],
1073        );
1074
1075        // If `is_first_execution_shard` is true, check `is_execution_shard == 1`.
1076        builder
1077            .when(public_values.is_first_execution_shard.into())
1078            .assert_one(public_values.is_execution_shard);
1079
1080        // If `is_first_execution_shard` is true, assert the initial boundary conditions.
1081
1082        // Check `prev_committed_value_digest == 0`.
1083        for i in 0..PV_DIGEST_NUM_WORDS {
1084            builder
1085                .when(public_values.is_first_execution_shard.into())
1086                .assert_all_zero(public_values.prev_committed_value_digest[i]);
1087        }
1088
1089        // Check `prev_deferred_proofs_digest == 0`.
1090        builder
1091            .when(public_values.is_first_execution_shard.into())
1092            .assert_all_zero(public_values.prev_deferred_proofs_digest);
1093
1094        // Check `prev_exit_code == 0`.
1095        builder
1096            .when(public_values.is_first_execution_shard.into())
1097            .assert_zero(public_values.prev_exit_code);
1098
1099        // Check `previous_init_addr == 0`.
1100        builder
1101            .when(public_values.is_first_execution_shard.into())
1102            .assert_all_zero(public_values.previous_init_addr);
1103
1104        // Check `previous_finalize_addr == 0`.
1105        builder
1106            .when(public_values.is_first_execution_shard.into())
1107            .assert_all_zero(public_values.previous_finalize_addr);
1108
1109        // Check `previous_init_page_idx == 0`
1110        builder
1111            .when(public_values.is_first_execution_shard.into())
1112            .assert_all_zero(public_values.previous_init_page_idx);
1113
1114        // Check `previous_finalize_page_idx == 0`
1115        builder
1116            .when(public_values.is_first_execution_shard.into())
1117            .assert_all_zero(public_values.previous_finalize_page_idx);
1118
1119        // Check `prev_commit_syscall == 0`.
1120        builder
1121            .when(public_values.is_first_execution_shard.into())
1122            .assert_zero(public_values.prev_commit_syscall);
1123
1124        // Check `prev_commit_deferred_syscall == 0`.
1125        builder
1126            .when(public_values.is_first_execution_shard.into())
1127            .assert_zero(public_values.prev_commit_deferred_syscall);
1128    }
1129
1130    #[allow(clippy::type_complexity)]
1131    fn eval_exit_code<AB: SP1AirBuilder>(
1132        public_values: &PublicValues<
1133            [AB::PublicVar; 4],
1134            [AB::PublicVar; 3],
1135            [AB::PublicVar; 4],
1136            AB::PublicVar,
1137        >,
1138        builder: &mut AB,
1139    ) {
1140        let is_execution_shard = public_values.is_execution_shard.into();
1141
1142        // If the `prev_exit_code` is non-zero, then the `exit_code` must be equal to it.
1143        builder.assert_zero(
1144            public_values.prev_exit_code.into()
1145                * (public_values.exit_code.into() - public_values.prev_exit_code.into()),
1146        );
1147
1148        // If it's not an execution shard, assert that `exit_code` will not change in that shard.
1149        builder
1150            .when_not(is_execution_shard.clone())
1151            .assert_eq(public_values.prev_exit_code, public_values.exit_code);
1152    }
1153
1154    #[allow(clippy::type_complexity)]
1155    fn eval_committed_value_digest<AB: SP1AirBuilder>(
1156        public_values: &PublicValues<
1157            [AB::PublicVar; 4],
1158            [AB::PublicVar; 3],
1159            [AB::PublicVar; 4],
1160            AB::PublicVar,
1161        >,
1162        builder: &mut AB,
1163    ) {
1164        let is_execution_shard = public_values.is_execution_shard.into();
1165
1166        // Assert that both `prev_committed_value_digest` and `committed_value_digest` are bytes.
1167        for i in 0..PV_DIGEST_NUM_WORDS {
1168            builder.send_byte(
1169                AB::Expr::from_canonical_u8(ByteOpcode::U8Range as u8),
1170                AB::Expr::zero(),
1171                public_values.prev_committed_value_digest[i][0],
1172                public_values.prev_committed_value_digest[i][1],
1173                AB::Expr::one(),
1174            );
1175            builder.send_byte(
1176                AB::Expr::from_canonical_u8(ByteOpcode::U8Range as u8),
1177                AB::Expr::zero(),
1178                public_values.prev_committed_value_digest[i][2],
1179                public_values.prev_committed_value_digest[i][3],
1180                AB::Expr::one(),
1181            );
1182            builder.send_byte(
1183                AB::Expr::from_canonical_u8(ByteOpcode::U8Range as u8),
1184                AB::Expr::zero(),
1185                public_values.committed_value_digest[i][0],
1186                public_values.committed_value_digest[i][1],
1187                AB::Expr::one(),
1188            );
1189            builder.send_byte(
1190                AB::Expr::from_canonical_u8(ByteOpcode::U8Range as u8),
1191                AB::Expr::zero(),
1192                public_values.committed_value_digest[i][2],
1193                public_values.committed_value_digest[i][3],
1194                AB::Expr::one(),
1195            );
1196        }
1197
1198        // Assert that both `prev_commit_syscall` and `commit_syscall` are boolean.
1199        builder.assert_bool(public_values.prev_commit_syscall);
1200        builder.assert_bool(public_values.commit_syscall);
1201
1202        // Assert that `prev_commit_syscall == 1` implies `commit_syscall == 1`.
1203        builder.when(public_values.prev_commit_syscall).assert_one(public_values.commit_syscall);
1204
1205        // Assert that the `commit_syscall` value doesn't change in a non-execution shard.
1206        builder
1207            .when_not(is_execution_shard.clone())
1208            .assert_eq(public_values.prev_commit_syscall, public_values.commit_syscall);
1209
1210        // Assert that `committed_value_digest` will not change in a non-execution shard.
1211        for i in 0..PV_DIGEST_NUM_WORDS {
1212            builder.when_not(is_execution_shard.clone()).assert_all_eq(
1213                public_values.prev_committed_value_digest[i],
1214                public_values.committed_value_digest[i],
1215            );
1216        }
1217
1218        // Assert that `prev_committed_value_digest != [0u8; 32]` implies `committed_value_digest`
1219        // must remain equal to the `prev_committed_value_digest`.
1220        for word in public_values.prev_committed_value_digest {
1221            for limb in word {
1222                for i in 0..PV_DIGEST_NUM_WORDS {
1223                    builder.when(limb).assert_all_eq(
1224                        public_values.prev_committed_value_digest[i],
1225                        public_values.committed_value_digest[i],
1226                    );
1227                }
1228            }
1229        }
1230
1231        // Assert that if `prev_commit_syscall` is true, `committed_value_digest` doesn't change.
1232        for i in 0..PV_DIGEST_NUM_WORDS {
1233            builder.when(public_values.prev_commit_syscall).assert_all_eq(
1234                public_values.prev_committed_value_digest[i],
1235                public_values.committed_value_digest[i],
1236            );
1237        }
1238    }
1239
1240    #[allow(clippy::type_complexity)]
1241    fn eval_deferred_proofs_digest<AB: SP1AirBuilder>(
1242        public_values: &PublicValues<
1243            [AB::PublicVar; 4],
1244            [AB::PublicVar; 3],
1245            [AB::PublicVar; 4],
1246            AB::PublicVar,
1247        >,
1248        builder: &mut AB,
1249    ) {
1250        let is_execution_shard = public_values.is_execution_shard.into();
1251
1252        // Assert that `prev_commit_deferred_syscall` and `commit_deferred_syscall` are boolean.
1253        builder.assert_bool(public_values.prev_commit_deferred_syscall);
1254        builder.assert_bool(public_values.commit_deferred_syscall);
1255
1256        // Assert that `prev_commit_deferred_syscall == 1` implies `commit_deferred_syscall == 1`.
1257        builder
1258            .when(public_values.prev_commit_deferred_syscall)
1259            .assert_one(public_values.commit_deferred_syscall);
1260
1261        // Assert that the `commit_deferred_syscall` value doesn't change in a non-execution shard.
1262        builder.when_not(is_execution_shard.clone()).assert_eq(
1263            public_values.prev_commit_deferred_syscall,
1264            public_values.commit_deferred_syscall,
1265        );
1266
1267        // Assert that `deferred_proofs_digest` will not change in a non-execution shard.
1268        builder.when_not(is_execution_shard.clone()).assert_all_eq(
1269            public_values.prev_deferred_proofs_digest,
1270            public_values.deferred_proofs_digest,
1271        );
1272
1273        // Assert that `prev_deferred_proofs_digest != 0` implies `deferred_proofs_digest` must
1274        // remain equal to the `prev_deferred_proofs_digest`.
1275        for limb in public_values.prev_deferred_proofs_digest {
1276            builder.when(limb).assert_all_eq(
1277                public_values.prev_deferred_proofs_digest,
1278                public_values.deferred_proofs_digest,
1279            );
1280        }
1281
1282        // If `prev_commit_deferred_syscall` is true, `deferred_proofs_digest` doesn't change.
1283        builder.when(public_values.prev_commit_deferred_syscall).assert_all_eq(
1284            public_values.prev_deferred_proofs_digest,
1285            public_values.deferred_proofs_digest,
1286        );
1287    }
1288
1289    #[allow(clippy::type_complexity)]
1290    fn eval_global_sum<AB: SP1AirBuilder>(
1291        public_values: &PublicValues<
1292            [AB::PublicVar; 4],
1293            [AB::PublicVar; 3],
1294            [AB::PublicVar; 4],
1295            AB::PublicVar,
1296        >,
1297        builder: &mut AB,
1298    ) {
1299        let initial_sum = SepticDigest::<AB::F>::zero().0;
1300        builder.send(
1301            AirInteraction::new(
1302                once(AB::Expr::zero())
1303                    .chain(initial_sum.x.0.into_iter().map(Into::into))
1304                    .chain(initial_sum.y.0.into_iter().map(Into::into))
1305                    .collect(),
1306                AB::Expr::one(),
1307                InteractionKind::GlobalAccumulation,
1308            ),
1309            InteractionScope::Local,
1310        );
1311        builder.receive(
1312            AirInteraction::new(
1313                once(public_values.global_count.into())
1314                    .chain(public_values.global_cumulative_sum.0.x.0.map(Into::into))
1315                    .chain(public_values.global_cumulative_sum.0.y.0.map(Into::into))
1316                    .collect(),
1317                AB::Expr::one(),
1318                InteractionKind::GlobalAccumulation,
1319            ),
1320            InteractionScope::Local,
1321        );
1322    }
1323
1324    #[allow(clippy::type_complexity)]
1325    fn eval_global_memory_init<AB: SP1AirBuilder>(
1326        public_values: &PublicValues<
1327            [AB::PublicVar; 4],
1328            [AB::PublicVar; 3],
1329            [AB::PublicVar; 4],
1330            AB::PublicVar,
1331        >,
1332        builder: &mut AB,
1333    ) {
1334        // Check the addresses are of valid u16 limbs.
1335        for i in 0..3 {
1336            builder.send_byte(
1337                AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
1338                public_values.previous_init_addr[i].into(),
1339                AB::Expr::from_canonical_u32(16),
1340                AB::Expr::zero(),
1341                AB::Expr::one(),
1342            );
1343            builder.send_byte(
1344                AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
1345                public_values.last_init_addr[i].into(),
1346                AB::Expr::from_canonical_u32(16),
1347                AB::Expr::zero(),
1348                AB::Expr::one(),
1349            );
1350        }
1351
1352        builder.send(
1353            AirInteraction::new(
1354                once(AB::Expr::zero())
1355                    .chain(public_values.previous_init_addr.into_iter().map(Into::into))
1356                    .chain(once(AB::Expr::one()))
1357                    .collect(),
1358                AB::Expr::one(),
1359                InteractionKind::MemoryGlobalInitControl,
1360            ),
1361            InteractionScope::Local,
1362        );
1363        builder.receive(
1364            AirInteraction::new(
1365                once(public_values.global_init_count.into())
1366                    .chain(public_values.last_init_addr.into_iter().map(Into::into))
1367                    .chain(once(AB::Expr::one()))
1368                    .collect(),
1369                AB::Expr::one(),
1370                InteractionKind::MemoryGlobalInitControl,
1371            ),
1372            InteractionScope::Local,
1373        );
1374    }
1375
1376    #[allow(clippy::type_complexity)]
1377    fn eval_global_memory_finalize<AB: SP1AirBuilder>(
1378        public_values: &PublicValues<
1379            [AB::PublicVar; 4],
1380            [AB::PublicVar; 3],
1381            [AB::PublicVar; 4],
1382            AB::PublicVar,
1383        >,
1384        builder: &mut AB,
1385    ) {
1386        // Check the addresses are of valid u16 limbs.
1387        for i in 0..3 {
1388            builder.send_byte(
1389                AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
1390                public_values.previous_finalize_addr[i].into(),
1391                AB::Expr::from_canonical_u32(16),
1392                AB::Expr::zero(),
1393                AB::Expr::one(),
1394            );
1395            builder.send_byte(
1396                AB::Expr::from_canonical_u32(ByteOpcode::Range as u32),
1397                public_values.last_finalize_addr[i].into(),
1398                AB::Expr::from_canonical_u32(16),
1399                AB::Expr::zero(),
1400                AB::Expr::one(),
1401            );
1402        }
1403
1404        builder.send(
1405            AirInteraction::new(
1406                once(AB::Expr::zero())
1407                    .chain(public_values.previous_finalize_addr.into_iter().map(Into::into))
1408                    .chain(once(AB::Expr::one()))
1409                    .collect(),
1410                AB::Expr::one(),
1411                InteractionKind::MemoryGlobalFinalizeControl,
1412            ),
1413            InteractionScope::Local,
1414        );
1415        builder.receive(
1416            AirInteraction::new(
1417                once(public_values.global_finalize_count.into())
1418                    .chain(public_values.last_finalize_addr.into_iter().map(Into::into))
1419                    .chain(once(AB::Expr::one()))
1420                    .collect(),
1421                AB::Expr::one(),
1422                InteractionKind::MemoryGlobalFinalizeControl,
1423            ),
1424            InteractionScope::Local,
1425        );
1426    }
1427
1428    #[allow(clippy::type_complexity)]
1429    fn eval_global_page_prot_init<AB: SP1AirBuilder>(
1430        public_values: &PublicValues<
1431            [AB::PublicVar; 4],
1432            [AB::PublicVar; 3],
1433            [AB::PublicVar; 4],
1434            AB::PublicVar,
1435        >,
1436        builder: &mut AB,
1437    ) {
1438        builder.assert_bool(public_values.is_untrusted_programs_enabled.into());
1439        builder.send(
1440            AirInteraction::new(
1441                once(AB::Expr::zero())
1442                    .chain(public_values.previous_init_page_idx.into_iter().map(Into::into))
1443                    .chain(once(AB::Expr::one()))
1444                    .collect(),
1445                public_values.is_untrusted_programs_enabled.into(),
1446                InteractionKind::PageProtGlobalInitControl,
1447            ),
1448            InteractionScope::Local,
1449        );
1450        builder.receive(
1451            AirInteraction::new(
1452                once(public_values.global_page_prot_init_count.into())
1453                    .chain(public_values.last_init_page_idx.into_iter().map(Into::into))
1454                    .chain(once(AB::Expr::one()))
1455                    .collect(),
1456                public_values.is_untrusted_programs_enabled.into(),
1457                InteractionKind::PageProtGlobalInitControl,
1458            ),
1459            InteractionScope::Local,
1460        );
1461    }
1462
1463    #[allow(clippy::type_complexity)]
1464    fn eval_global_page_prot_finalize<AB: SP1AirBuilder>(
1465        public_values: &PublicValues<
1466            [AB::PublicVar; 4],
1467            [AB::PublicVar; 3],
1468            [AB::PublicVar; 4],
1469            AB::PublicVar,
1470        >,
1471        builder: &mut AB,
1472    ) {
1473        builder.assert_bool(public_values.is_untrusted_programs_enabled.into());
1474        builder.send(
1475            AirInteraction::new(
1476                once(AB::Expr::zero())
1477                    .chain(public_values.previous_finalize_page_idx.into_iter().map(Into::into))
1478                    .chain(once(AB::Expr::one()))
1479                    .collect(),
1480                public_values.is_untrusted_programs_enabled.into(),
1481                InteractionKind::PageProtGlobalFinalizeControl,
1482            ),
1483            InteractionScope::Local,
1484        );
1485        builder.receive(
1486            AirInteraction::new(
1487                once(public_values.global_page_prot_finalize_count.into())
1488                    .chain(public_values.last_finalize_page_idx.into_iter().map(Into::into))
1489                    .chain(once(AB::Expr::one()))
1490                    .collect(),
1491                public_values.is_untrusted_programs_enabled.into(),
1492                InteractionKind::PageProtGlobalFinalizeControl,
1493            ),
1494            InteractionScope::Local,
1495        );
1496    }
1497
1498    /// Finalize the public values.
1499    pub fn finalize_public_values<F: PrimeField32>(&mut self, is_execution_shard: bool) {
1500        let state = &mut self.public_values;
1501        state.is_execution_shard = is_execution_shard as u32;
1502
1503        let initial_timestamp_high = (state.initial_timestamp >> 24) as u32;
1504        let initial_timestamp_low = (state.initial_timestamp & 0xFFFFFF) as u32;
1505        let last_timestamp_high = (state.last_timestamp >> 24) as u32;
1506        let last_timestamp_low = (state.last_timestamp & 0xFFFFFF) as u32;
1507
1508        state.initial_timestamp_inv = if state.initial_timestamp == 1 {
1509            0
1510        } else {
1511            F::from_canonical_u32(initial_timestamp_high + initial_timestamp_low - 1)
1512                .inverse()
1513                .as_canonical_u32()
1514        };
1515
1516        state.last_timestamp_inv =
1517            F::from_canonical_u32(last_timestamp_high + last_timestamp_low - 1)
1518                .inverse()
1519                .as_canonical_u32();
1520
1521        if initial_timestamp_high == last_timestamp_high {
1522            state.is_timestamp_high_eq = 1;
1523        } else {
1524            state.is_timestamp_high_eq = 0;
1525            state.inv_timestamp_high = (F::from_canonical_u32(last_timestamp_high)
1526                - F::from_canonical_u32(initial_timestamp_high))
1527            .inverse()
1528            .as_canonical_u32();
1529        }
1530
1531        if initial_timestamp_low == last_timestamp_low {
1532            state.is_timestamp_low_eq = 1;
1533        } else {
1534            state.is_timestamp_low_eq = 0;
1535            state.inv_timestamp_low = (F::from_canonical_u32(last_timestamp_low)
1536                - F::from_canonical_u32(initial_timestamp_low))
1537            .inverse()
1538            .as_canonical_u32();
1539        }
1540        state.is_first_execution_shard = (state.initial_timestamp == 1) as u32;
1541    }
1542}