sp1_recursion_program/machine/
core.rs

1use std::{
2    array,
3    borrow::{Borrow, BorrowMut},
4    marker::PhantomData,
5};
6
7use itertools::Itertools;
8use p3_baby_bear::BabyBear;
9use p3_commit::TwoAdicMultiplicativeCoset;
10use p3_field::{AbstractField, PrimeField32, TwoAdicField};
11use sp1_core_machine::{cpu::MAX_CPU_LOG_DEGREE, riscv::RiscvAir};
12use sp1_primitives::{consts::WORD_SIZE, types::RecursionProgramType};
13use sp1_recursion_compiler::{
14    config::InnerConfig,
15    ir::{Array, Builder, Config, Ext, ExtConst, Felt, Var},
16    prelude::{DslVariable, *},
17};
18use sp1_recursion_core::{
19    air::{RecursionPublicValues, RECURSIVE_PROOF_NUM_PV_ELTS},
20    runtime::{RecursionProgram, DIGEST_SIZE},
21};
22use sp1_stark::{
23    air::{MachineAir, PublicValues, POSEIDON_NUM_WORDS, PV_DIGEST_NUM_WORDS},
24    baby_bear_poseidon2::BabyBearPoseidon2,
25    Com, ShardProof, StarkGenericConfig, StarkMachine, StarkVerifyingKey, Word,
26};
27
28use crate::{
29    challenger::{CanObserveVariable, DuplexChallengerVariable},
30    fri::TwoAdicFriPcsVariable,
31    hints::Hintable,
32    stark::{StarkVerifier, EMPTY},
33    types::{ShardProofVariable, VerifyingKeyVariable},
34    utils::{const_fri_config, felt2var, get_challenger_public_values, hash_vkey, var2felt},
35};
36
37use super::utils::{assert_complete, commit_public_values};
38
39/// A program for recursively verifying a batch of SP1 proofs.
40#[derive(Debug, Clone, Copy)]
41pub struct SP1RecursiveVerifier<C: Config, SC: StarkGenericConfig> {
42    _phantom: PhantomData<(C, SC)>,
43}
44
45pub struct SP1RecursionMemoryLayout<'a, SC: StarkGenericConfig, A: MachineAir<SC::Val>> {
46    pub vk: &'a StarkVerifyingKey<SC>,
47    pub machine: &'a StarkMachine<SC, A>,
48    pub shard_proofs: Vec<ShardProof<SC>>,
49    pub leaf_challenger: &'a SC::Challenger,
50    pub initial_reconstruct_challenger: SC::Challenger,
51    pub is_complete: bool,
52}
53
54#[derive(DslVariable, Clone)]
55pub struct SP1RecursionMemoryLayoutVariable<C: Config> {
56    pub vk: VerifyingKeyVariable<C>,
57    pub shard_proofs: Array<C, ShardProofVariable<C>>,
58    pub leaf_challenger: DuplexChallengerVariable<C>,
59    pub initial_reconstruct_challenger: DuplexChallengerVariable<C>,
60    pub is_complete: Var<C::N>,
61}
62
63impl SP1RecursiveVerifier<InnerConfig, BabyBearPoseidon2> {
64    /// Create a new instance of the program for the [BabyBearPoseidon2] config.
65    pub fn build(
66        machine: &StarkMachine<BabyBearPoseidon2, RiscvAir<BabyBear>>,
67    ) -> RecursionProgram<BabyBear> {
68        let mut builder = Builder::<InnerConfig>::new(RecursionProgramType::Core);
69
70        let input: SP1RecursionMemoryLayoutVariable<_> = builder.uninit();
71        SP1RecursionMemoryLayout::<BabyBearPoseidon2, RiscvAir<_>>::witness(&input, &mut builder);
72
73        let pcs = TwoAdicFriPcsVariable {
74            config: const_fri_config(&mut builder, machine.config().pcs().fri_config()),
75        };
76        SP1RecursiveVerifier::verify(&mut builder, &pcs, machine, input);
77
78        builder.halt();
79
80        builder.compile_program()
81    }
82}
83
84impl<C: Config, SC: StarkGenericConfig> SP1RecursiveVerifier<C, SC>
85where
86    C::F: PrimeField32 + TwoAdicField,
87    SC: StarkGenericConfig<
88        Val = C::F,
89        Challenge = C::EF,
90        Domain = TwoAdicMultiplicativeCoset<C::F>,
91    >,
92    Com<SC>: Into<[SC::Val; DIGEST_SIZE]>,
93{
94    /// Verify a batch of SP1 shard proofs and aggregate their public values.
95    ///
96    /// This program represents a first recursive step in the verification of an SP1 proof
97    /// consisting of one or more shards. Each shard proof is verified and its public values are
98    /// aggregated into a single set representing the start and end state of the program execution
99    /// across all shards.
100    ///
101    /// # Constraints
102    ///
103    /// ## Verifying the STARK proofs.
104    /// For each shard, the verifier asserts the correctness of the STARK proof which is composed
105    /// of verifying the FRI proof for openings and verifying the constraints.
106    ///
107    /// ## Aggregating the shard public values.
108    /// See [SP1Prover::verify] for the verification algorithm of a complete SP1 proof. In this
109    /// function, we are aggregating several shard proofs and attesting to an aggregated state which
110    /// represents all the shards.
111    ///
112    /// ## The leaf challenger.
113    /// A key difference between the recursive tree verification and the complete one in
114    /// [SP1Prover::verify] is that the recursive verifier has no way of reconstructing the
115    /// chanllenger only from a part of the shard proof. Therefore, the value of the leaf challenger
116    /// is witnessed in the program and the verifier asserts correctness given this challenger.
117    /// In the course of the recursive verification, the challenger is reconstructed by observing
118    /// the commitments one by one, and in the final step, the challenger is asserted to be the same
119    /// as the one witnessed here.
120    pub fn verify(
121        builder: &mut Builder<C>,
122        pcs: &TwoAdicFriPcsVariable<C>,
123        machine: &StarkMachine<SC, RiscvAir<SC::Val>>,
124        input: SP1RecursionMemoryLayoutVariable<C>,
125    ) {
126        // Read input.
127        let SP1RecursionMemoryLayoutVariable {
128            vk,
129            shard_proofs,
130            leaf_challenger,
131            initial_reconstruct_challenger,
132            is_complete,
133        } = input;
134
135        // Initialize shard variables.
136        let initial_shard = builder.uninit();
137        let current_shard = builder.uninit();
138
139        // Initialize execution shard variables.
140        let initial_execution_shard = builder.uninit();
141        let current_execution_shard = builder.uninit();
142
143        // Initialize program counter variables.
144        let start_pc = builder.uninit();
145        let current_pc = builder.uninit();
146
147        // Initialize memory initialization and finalization variables.
148        let initial_previous_init_addr_bits: [Felt<_>; 32] = array::from_fn(|_| builder.uninit());
149        let initial_previous_finalize_addr_bits: [Felt<_>; 32] =
150            array::from_fn(|_| builder.uninit());
151        let current_init_addr_bits: [Felt<_>; 32] = array::from_fn(|_| builder.uninit());
152        let current_finalize_addr_bits: [Felt<_>; 32] = array::from_fn(|_| builder.uninit());
153
154        // Initialize the exit code variable.
155        let exit_code: Felt<_> = builder.uninit();
156
157        // Initialize the public values digest.
158        let committed_value_digest: [Word<Felt<_>>; PV_DIGEST_NUM_WORDS] =
159            array::from_fn(|_| Word(array::from_fn(|_| builder.uninit())));
160
161        // Initialize the deferred proofs digest.
162        let deferred_proofs_digest: [Felt<_>; POSEIDON_NUM_WORDS] =
163            array::from_fn(|_| builder.uninit());
164
165        // Initialize the challenger variables.
166        let leaf_challenger_public_values = get_challenger_public_values(builder, &leaf_challenger);
167        let mut reconstruct_challenger: DuplexChallengerVariable<_> =
168            initial_reconstruct_challenger.copy(builder);
169
170        // Initialize the cumulative sum.
171        let cumulative_sum: Ext<_, _> = builder.eval(C::EF::zero().cons());
172
173        // Assert that the number of proofs is not zero.
174        builder.assert_usize_ne(shard_proofs.len(), 0);
175
176        // Verify proofs, validate transitions, and update accumulation variables.
177        builder.range(0, shard_proofs.len()).for_each(|i, builder| {
178            // Load the proof.
179            let proof = builder.get(&shard_proofs, i);
180
181            // Compute some flags about which chips exist in the shard.
182            let contains_cpu: Var<_> = builder.eval(C::N::zero());
183            let contains_memory_init: Var<_> = builder.eval(C::N::zero());
184            let contains_memory_finalize: Var<_> = builder.eval(C::N::zero());
185            for (i, chip) in machine.chips().iter().enumerate() {
186                let index = builder.get(&proof.sorted_idxs, i);
187                if chip.name() == "CPU" {
188                    builder.if_ne(index, C::N::from_canonical_usize(EMPTY)).then(|builder| {
189                        builder.assign(contains_cpu, C::N::one());
190                    });
191                } else if chip.name() == "MemoryInit" {
192                    builder.if_ne(index, C::N::from_canonical_usize(EMPTY)).then(|builder| {
193                        builder.assign(contains_memory_init, C::N::one());
194                    });
195                } else if chip.name() == "MemoryFinalize" {
196                    builder.if_ne(index, C::N::from_canonical_usize(EMPTY)).then(|builder| {
197                        builder.assign(contains_memory_finalize, C::N::one());
198                    });
199                }
200            }
201
202            // Extract public values.
203            let mut pv_elements = Vec::new();
204            for i in 0..machine.num_pv_elts() {
205                let element = builder.get(&proof.public_values, i);
206                pv_elements.push(element);
207            }
208            let public_values: &PublicValues<Word<Felt<_>>, Felt<_>> =
209                pv_elements.as_slice().borrow();
210
211            // If this is the first proof in the batch, initialize the variables.
212            builder.if_eq(i, C::N::zero()).then(|builder| {
213                // Shard.
214                builder.assign(initial_shard, public_values.shard);
215                builder.assign(current_shard, public_values.shard);
216
217                // Execution shard.
218                builder.assign(initial_execution_shard, public_values.execution_shard);
219                builder.assign(current_execution_shard, public_values.execution_shard);
220
221                // Program counter.
222                builder.assign(start_pc, public_values.start_pc);
223                builder.assign(current_pc, public_values.start_pc);
224
225                // Memory initialization & finalization.
226                for ((bit, pub_bit), first_bit) in current_init_addr_bits
227                    .iter()
228                    .zip(public_values.previous_init_addr_bits.iter())
229                    .zip(initial_previous_init_addr_bits.iter())
230                {
231                    builder.assign(*bit, *pub_bit);
232                    builder.assign(*first_bit, *pub_bit);
233                }
234                for ((bit, pub_bit), first_bit) in current_finalize_addr_bits
235                    .iter()
236                    .zip(public_values.previous_finalize_addr_bits.iter())
237                    .zip(initial_previous_finalize_addr_bits.iter())
238                {
239                    builder.assign(*bit, *pub_bit);
240                    builder.assign(*first_bit, *pub_bit);
241                }
242
243                // Exit code.
244                builder.assign(exit_code, public_values.exit_code);
245
246                // Commited public values digests.
247                for (word, first_word) in committed_value_digest
248                    .iter()
249                    .zip_eq(public_values.committed_value_digest.iter())
250                {
251                    for (byte, first_byte) in word.0.iter().zip_eq(first_word.0.iter()) {
252                        builder.assign(*byte, *first_byte);
253                    }
254                }
255
256                // Deferred proofs digests.
257                for (digest, first_digest) in deferred_proofs_digest
258                    .iter()
259                    .zip_eq(public_values.deferred_proofs_digest.iter())
260                {
261                    builder.assign(*digest, *first_digest);
262                }
263            });
264
265            // If the shard is the first shard, assert that the initial challenger is equal to a
266            // fresh challenger observing the verifier key and the initial pc.
267            let shard = felt2var(builder, public_values.shard);
268            builder.if_eq(shard, C::N::one()).then(|builder| {
269                let mut first_initial_challenger = DuplexChallengerVariable::new(builder);
270                first_initial_challenger.observe(builder, vk.commitment.clone());
271                first_initial_challenger.observe(builder, vk.pc_start);
272                initial_reconstruct_challenger.assert_eq(builder, &first_initial_challenger);
273            });
274
275            // Verify the shard.
276            //
277            // Do not verify the cumulative sum here, since the permutation challenge is shared
278            // between all shards.
279            let mut challenger = leaf_challenger.copy(builder);
280            StarkVerifier::<C, SC>::verify_shard(
281                builder,
282                &vk,
283                pcs,
284                machine,
285                &mut challenger,
286                &proof,
287                false,
288            );
289
290            // First shard has a "CPU" constraint.
291            {
292                builder.if_eq(shard, C::N::one()).then(|builder| {
293                    builder.assert_var_eq(contains_cpu, C::N::one());
294                });
295            }
296
297            // CPU log degree bound check constraints.
298            {
299                for (i, chip) in machine.chips().iter().enumerate() {
300                    if chip.name() == "CPU" {
301                        builder.if_eq(contains_cpu, C::N::one()).then(|builder| {
302                            let index = builder.get(&proof.sorted_idxs, i);
303                            let cpu_log_degree =
304                                builder.get(&proof.opened_values.chips, index).log_degree;
305                            let cpu_log_degree_lt_max: Var<_> = builder.eval(C::N::zero());
306                            builder.range(0, MAX_CPU_LOG_DEGREE + 1).for_each(|j, builder| {
307                                builder.if_eq(j, cpu_log_degree).then(|builder| {
308                                    builder.assign(cpu_log_degree_lt_max, C::N::one());
309                                });
310                            });
311                            builder.assert_var_eq(cpu_log_degree_lt_max, C::N::one());
312                        });
313                    }
314                }
315            }
316
317            // Shard constraints.
318            {
319                // Assert that the shard of the proof is equal to the current shard.
320                builder.assert_felt_eq(current_shard, public_values.shard);
321
322                // Increment the current shard by one.
323                builder.assign(current_shard, current_shard + C::F::one());
324            }
325
326            // Execution shard constraints.
327            let execution_shard = felt2var(builder, public_values.execution_shard);
328            {
329                // Assert that the shard of the proof is equal to the current shard.
330                builder.if_eq(contains_cpu, C::N::one()).then(|builder| {
331                    builder.assert_felt_eq(current_execution_shard, public_values.execution_shard);
332                });
333
334                // If the shard has a "CPU" chip, then the execution shard should be incremented by
335                // 1.
336                builder.if_eq(contains_cpu, C::N::one()).then(|builder| {
337                    builder.assign(current_execution_shard, current_execution_shard + C::F::one());
338                });
339            }
340
341            // Program counter constraints.
342            {
343                // If it's the first shard (which is the first execution shard), then the start_pc
344                // should be vk.pc_start.
345                builder.if_eq(shard, C::N::one()).then(|builder| {
346                    builder.assert_felt_eq(public_values.start_pc, vk.pc_start);
347                });
348
349                // Assert that the start_pc of the proof is equal to the current pc.
350                builder.assert_felt_eq(current_pc, public_values.start_pc);
351
352                // If it's not a shard with "CPU", then assert that the start_pc equals the next_pc.
353                builder.if_ne(contains_cpu, C::N::one()).then(|builder| {
354                    builder.assert_felt_eq(public_values.start_pc, public_values.next_pc);
355                });
356
357                // If it's a shard with "CPU", then assert that the start_pc is not zero.
358                builder.if_eq(contains_cpu, C::N::one()).then(|builder| {
359                    builder.assert_felt_ne(public_values.start_pc, C::F::zero());
360                });
361
362                // Update current_pc to be the end_pc of the current proof.
363                builder.assign(current_pc, public_values.next_pc);
364            }
365
366            // Exit code constraints.
367            {
368                // Assert that the exit code is zero (success) for all proofs.
369                builder.assert_felt_eq(exit_code, C::F::zero());
370            }
371
372            // Memory initialization & finalization constraints.
373            {
374                // Assert that `init_addr_bits` and `finalize_addr_bits` are zero for the first
375                // execution shard.
376                builder.if_eq(execution_shard, C::N::one()).then(|builder| {
377                    // Assert that the MemoryInitialize address bits are zero.
378                    for bit in current_init_addr_bits.iter() {
379                        builder.assert_felt_eq(*bit, C::F::zero());
380                    }
381
382                    // Assert that the MemoryFinalize address bits are zero.
383                    for bit in current_finalize_addr_bits.iter() {
384                        builder.assert_felt_eq(*bit, C::F::zero());
385                    }
386                });
387
388                // Assert that the MemoryInitialize address bits match the current loop variable.
389                for (bit, current_bit) in current_init_addr_bits
390                    .iter()
391                    .zip_eq(public_values.previous_init_addr_bits.iter())
392                {
393                    builder.assert_felt_eq(*bit, *current_bit);
394                }
395
396                // Assert that the MemoryFinalize address bits match the current loop variable.
397                for (bit, current_bit) in current_finalize_addr_bits
398                    .iter()
399                    .zip_eq(public_values.previous_finalize_addr_bits.iter())
400                {
401                    builder.assert_felt_eq(*bit, *current_bit);
402                }
403
404                // Assert that if MemoryInit is not present, then the address bits are the same.
405                builder.if_ne(contains_memory_init, C::N::one()).then(|builder| {
406                    for (prev_bit, last_bit) in public_values
407                        .previous_init_addr_bits
408                        .iter()
409                        .zip_eq(public_values.last_init_addr_bits.iter())
410                    {
411                        builder.assert_felt_eq(*prev_bit, *last_bit);
412                    }
413                });
414
415                // Assert that if MemoryFinalize is not present, then the address bits are the same.
416                builder.if_ne(contains_memory_finalize, C::N::one()).then(|builder| {
417                    for (prev_bit, last_bit) in public_values
418                        .previous_finalize_addr_bits
419                        .iter()
420                        .zip_eq(public_values.last_finalize_addr_bits.iter())
421                    {
422                        builder.assert_felt_eq(*prev_bit, *last_bit);
423                    }
424                });
425
426                // Update the MemoryInitialize address bits.
427                for (bit, pub_bit) in
428                    current_init_addr_bits.iter().zip(public_values.last_init_addr_bits.iter())
429                {
430                    builder.assign(*bit, *pub_bit);
431                }
432
433                // Update the MemoryFinalize address bits.
434                for (bit, pub_bit) in current_finalize_addr_bits
435                    .iter()
436                    .zip(public_values.last_finalize_addr_bits.iter())
437                {
438                    builder.assign(*bit, *pub_bit);
439                }
440            }
441
442            // Digest constraints.
443            {
444                // If `commited_value_digest` is not zero, then `public_values.commited_value_digest
445                // should be the current value.
446                let is_zero: Var<_> = builder.eval(C::N::one());
447                #[allow(clippy::needless_range_loop)]
448                for i in 0..committed_value_digest.len() {
449                    for j in 0..WORD_SIZE {
450                        let d = felt2var(builder, committed_value_digest[i][j]);
451                        builder.if_ne(d, C::N::zero()).then(|builder| {
452                            builder.assign(is_zero, C::N::zero());
453                        });
454                    }
455                }
456                builder.if_eq(is_zero, C::N::zero()).then(|builder| {
457                    #[allow(clippy::needless_range_loop)]
458                    for i in 0..committed_value_digest.len() {
459                        for j in 0..WORD_SIZE {
460                            builder.assert_felt_eq(
461                                committed_value_digest[i][j],
462                                public_values.committed_value_digest[i][j],
463                            );
464                        }
465                    }
466                });
467
468                // If it's not a shard with "CPU", then the committed value digest should not
469                // change.
470                builder.if_ne(contains_cpu, C::N::one()).then(|builder| {
471                    #[allow(clippy::needless_range_loop)]
472                    for i in 0..committed_value_digest.len() {
473                        for j in 0..WORD_SIZE {
474                            builder.assert_felt_eq(
475                                committed_value_digest[i][j],
476                                public_values.committed_value_digest[i][j],
477                            );
478                        }
479                    }
480                });
481
482                // Update the committed value digest.
483                #[allow(clippy::needless_range_loop)]
484                for i in 0..committed_value_digest.len() {
485                    for j in 0..WORD_SIZE {
486                        builder.assign(
487                            committed_value_digest[i][j],
488                            public_values.committed_value_digest[i][j],
489                        );
490                    }
491                }
492
493                // If `deferred_proofs_digest` is not zero, then
494                // `public_values.deferred_proofs_digest should be the current
495                // value.
496                let is_zero: Var<_> = builder.eval(C::N::one());
497                #[allow(clippy::needless_range_loop)]
498                for i in 0..deferred_proofs_digest.len() {
499                    let d = felt2var(builder, deferred_proofs_digest[i]);
500                    builder.if_ne(d, C::N::zero()).then(|builder| {
501                        builder.assign(is_zero, C::N::zero());
502                    });
503                }
504                builder.if_eq(is_zero, C::N::zero()).then(|builder| {
505                    #[allow(clippy::needless_range_loop)]
506                    for i in 0..deferred_proofs_digest.len() {
507                        builder.assert_felt_eq(
508                            deferred_proofs_digest[i],
509                            public_values.deferred_proofs_digest[i],
510                        );
511                    }
512                });
513
514                // If it's not a shard with "CPU", then the deferred proofs digest should not
515                // change.
516                builder.if_ne(contains_cpu, C::N::one()).then(|builder| {
517                    #[allow(clippy::needless_range_loop)]
518                    for i in 0..deferred_proofs_digest.len() {
519                        builder.assert_felt_eq(
520                            deferred_proofs_digest[i],
521                            public_values.deferred_proofs_digest[i],
522                        );
523                    }
524                });
525
526                // Update the deferred proofs digest.
527                #[allow(clippy::needless_range_loop)]
528                for i in 0..deferred_proofs_digest.len() {
529                    builder
530                        .assign(deferred_proofs_digest[i], public_values.deferred_proofs_digest[i]);
531                }
532            }
533
534            // Verify that the number of shards is not too large.
535            builder.range_check_f(public_values.shard, 16);
536
537            // Update the reconstruct challenger.
538            reconstruct_challenger.observe(builder, proof.commitment.main_commit.clone());
539            for j in 0..machine.num_pv_elts() {
540                let element = builder.get(&proof.public_values, j);
541                reconstruct_challenger.observe(builder, element);
542            }
543
544            // Cumulative sum is updated by sums of all chips.
545            let opened_values = proof.opened_values.chips;
546            builder.range(0, opened_values.len()).for_each(|k, builder| {
547                let values = builder.get(&opened_values, k);
548                let sum = values.cumulative_sum;
549                builder.assign(cumulative_sum, cumulative_sum + sum);
550            });
551        });
552
553        // Write all values to the public values struct and commit to them.
554        {
555            // Compute the vk digest.
556            let vk_digest = hash_vkey(builder, &vk);
557            let vk_digest: [Felt<_>; DIGEST_SIZE] = array::from_fn(|i| builder.get(&vk_digest, i));
558
559            // Collect the public values for challengers.
560            let initial_challenger_public_values =
561                get_challenger_public_values(builder, &initial_reconstruct_challenger);
562            let final_challenger_public_values =
563                get_challenger_public_values(builder, &reconstruct_challenger);
564
565            // Collect the cumulative sum.
566            let cumulative_sum_array = builder.ext2felt(cumulative_sum);
567            let cumulative_sum_array = array::from_fn(|i| builder.get(&cumulative_sum_array, i));
568
569            // Collect the deferred proof digests.
570            let zero: Felt<_> = builder.eval(C::F::zero());
571            let start_deferred_digest = [zero; POSEIDON_NUM_WORDS];
572            let end_deferred_digest = [zero; POSEIDON_NUM_WORDS];
573
574            // Collect the is_complete flag.
575            let is_complete_felt = var2felt(builder, is_complete);
576
577            // Initialize the public values we will commit to.
578            let mut recursion_public_values_stream = [zero; RECURSIVE_PROOF_NUM_PV_ELTS];
579            let recursion_public_values: &mut RecursionPublicValues<_> =
580                recursion_public_values_stream.as_mut_slice().borrow_mut();
581            recursion_public_values.committed_value_digest = committed_value_digest;
582            recursion_public_values.deferred_proofs_digest = deferred_proofs_digest;
583            recursion_public_values.start_pc = start_pc;
584            recursion_public_values.next_pc = current_pc;
585            recursion_public_values.start_shard = initial_shard;
586            recursion_public_values.next_shard = current_shard;
587            recursion_public_values.start_execution_shard = initial_execution_shard;
588            recursion_public_values.next_execution_shard = current_execution_shard;
589            recursion_public_values.previous_init_addr_bits = initial_previous_init_addr_bits;
590            recursion_public_values.last_init_addr_bits = current_init_addr_bits;
591            recursion_public_values.previous_finalize_addr_bits =
592                initial_previous_finalize_addr_bits;
593            recursion_public_values.last_finalize_addr_bits = current_finalize_addr_bits;
594            recursion_public_values.sp1_vk_digest = vk_digest;
595            recursion_public_values.leaf_challenger = leaf_challenger_public_values;
596            recursion_public_values.start_reconstruct_challenger = initial_challenger_public_values;
597            recursion_public_values.end_reconstruct_challenger = final_challenger_public_values;
598            recursion_public_values.cumulative_sum = cumulative_sum_array;
599            recursion_public_values.start_reconstruct_deferred_digest = start_deferred_digest;
600            recursion_public_values.end_reconstruct_deferred_digest = end_deferred_digest;
601            recursion_public_values.exit_code = exit_code;
602            recursion_public_values.is_complete = is_complete_felt;
603
604            // If the proof represents a complete proof, make completeness assertions.
605            //
606            // *Remark*: In this program, this only happends if there is one shard and the program
607            // has no deferred proofs to verify. However, the completeness check is
608            // independent of these facts.
609            builder.if_eq(is_complete, C::N::one()).then(|builder| {
610                assert_complete(builder, recursion_public_values, &reconstruct_challenger)
611            });
612
613            commit_public_values(builder, recursion_public_values);
614        }
615    }
616}