Skip to main content

sp1_hypercube/verifier/
shard.rs

1use derive_where::derive_where;
2use slop_basefold::FriConfig;
3use slop_merkle_tree::MerkleTreeTcs;
4#[allow(clippy::disallowed_types)]
5use slop_stacked::{StackedBasefoldProof, StackedPcsVerifier};
6use slop_whir::{Verifier, WhirProofShape};
7use sp1_primitives::{SP1GlobalContext, SP1OuterGlobalContext};
8use std::{
9    collections::{BTreeMap, BTreeSet},
10    iter::once,
11    marker::PhantomData,
12    ops::Deref,
13};
14
15use itertools::Itertools;
16use slop_air::{Air, BaseAir};
17use slop_algebra::{AbstractField, PrimeField32, TwoAdicField};
18use slop_challenger::{CanObserve, FieldChallenger, IopCtx, VariableLengthChallenger};
19use slop_commit::Rounds;
20use slop_jagged::{JaggedPcsVerifier, JaggedPcsVerifierError};
21use slop_matrix::dense::RowMajorMatrixView;
22use slop_multilinear::{full_geq, Evaluations, Mle, MleEval, MultilinearPcsVerifier};
23use slop_sumcheck::{partially_verify_sumcheck_proof, SumcheckError};
24use thiserror::Error;
25
26use crate::{
27    air::MachineAir,
28    prover::{CoreProofShape, PcsProof, ZerocheckAir},
29    Chip, ChipOpenedValues, LogUpEvaluations, LogUpGkrVerifier, LogupGkrVerificationError, Machine,
30    ShardContext, ShardContextImpl, VerifierConstraintFolder, MAX_CONSTRAINT_DEGREE,
31    PROOF_MAX_NUM_PVS, SP1SC,
32};
33
34use super::{MachineVerifyingKey, ShardOpenedValues, ShardProof};
35
36/// The number of commitments in an SP1 shard proof, corresponding to the preprocessed and main
37/// commitments.
38pub const NUM_SP1_COMMITMENTS: usize = 2;
39
40/// The number of bits to grind in sampling the GKR randomness.
41pub const GKR_GRINDING_BITS: usize = 12;
42
43#[allow(clippy::disallowed_types)]
44/// The Multilinear PCS used in SP1 shard proofs, generic in the `IopCtx`.
45pub type SP1Pcs<GC> = StackedPcsVerifier<GC>;
46
47/// The PCS used for all stages of SP1 proving except for wrap.
48pub type SP1InnerPcs = SP1Pcs<SP1GlobalContext>;
49
50/// The PCS used for wrap proving.
51pub type SP1OuterPcs = SP1Pcs<SP1OuterGlobalContext>;
52
53/// The PCS proof type used in SP1 shard proofs.
54#[allow(clippy::disallowed_types)]
55pub type SP1PcsProof<GC> = StackedBasefoldProof<GC>;
56
57/// The proof type for all stages of SP1 proving except for wrap.
58pub type SP1PcsProofInner = SP1PcsProof<SP1GlobalContext>;
59
60/// The proof type for wrap proving.
61pub type SP1PcsProofOuter = SP1PcsProof<SP1OuterGlobalContext>;
62
63/// A verifier for shard proofs.
64#[derive_where(Clone)]
65pub struct ShardVerifier<GC: IopCtx, SC: ShardContext<GC>> {
66    /// The jagged pcs verifier.
67    pub jagged_pcs_verifier: JaggedPcsVerifier<GC, SC::Config>,
68    /// The machine.
69    pub machine: Machine<GC::F, SC::Air>,
70}
71
72/// An error that occurs during the verification of a shard proof.
73#[derive(Debug, Error)]
74pub enum ShardVerifierError<EF, PcsError> {
75    /// The pcs opening proof is invalid.
76    #[error("invalid pcs opening proof: {0}")]
77    InvalidopeningArgument(#[from] JaggedPcsVerifierError<EF, PcsError>),
78    /// The constraints check failed.
79    #[error("constraints check failed: {0}")]
80    ConstraintsCheckFailed(SumcheckError),
81    /// The cumulative sums error.
82    #[error("cumulative sums error: {0}")]
83    CumulativeSumsError(&'static str),
84    /// The preprocessed chip id mismatch.
85    #[error("preprocessed chip id mismatch: {0}")]
86    PreprocessedChipIdMismatch(String, String),
87    /// The error to report when the preprocessed chip height in the verifying key does not match
88    /// the chip opening height.
89    #[error("preprocessed chip height mismatch: {0}")]
90    PreprocessedChipHeightMismatch(String),
91    /// The chip opening length mismatch.
92    #[error("chip opening length mismatch")]
93    ChipOpeningLengthMismatch,
94    /// The cpu chip is missing.
95    #[error("missing cpu chip")]
96    MissingCpuChip,
97    /// The shape of the openings does not match the expected shape.
98    #[error("opening shape mismatch: {0}")]
99    OpeningShapeMismatch(#[from] OpeningShapeError),
100    /// The GKR verification failed.
101    #[error("GKR verification failed: {0}")]
102    GkrVerificationFailed(LogupGkrVerificationError<EF>),
103    /// The public values verification failed.
104    #[error("public values verification failed")]
105    InvalidPublicValues,
106    /// The proof has entries with invalid shape.
107    #[error("invalid shape of proof")]
108    InvalidShape,
109    /// The provided chip opened values has incorrect order.
110    #[error("invalid chip opening order: ({0}, {1})")]
111    InvalidChipOrder(String, String),
112    /// The height of the chip is not sent over correctly as bitwise decomposition.
113    #[error("invalid height bit decomposition")]
114    InvalidHeightBitDecomposition,
115    /// The height is larger than `1 << max_log_row_count`.
116    #[error("height is larger than maximum possible value")]
117    HeightTooLarge,
118}
119
120/// Derive the error type from the jagged config.
121pub type ShardVerifierConfigError<GC, C> =
122    ShardVerifierError<<GC as IopCtx>::EF, <C as MultilinearPcsVerifier<GC>>::VerifierError>;
123
124/// An error that occurs when the shape of the openings does not match the expected shape.
125#[derive(Debug, Error)]
126pub enum OpeningShapeError {
127    /// The width of the preprocessed trace does not match the expected width.
128    #[error("preprocessed width mismatch: {0} != {1}")]
129    PreprocessedWidthMismatch(usize, usize),
130    /// The width of the main trace does not match the expected width.
131    #[error("main width mismatch: {0} != {1}")]
132    MainWidthMismatch(usize, usize),
133}
134
135impl<GC: IopCtx, SC: ShardContext<GC>> ShardVerifier<GC, SC> {
136    /// Get a shard verifier from a jagged pcs verifier.
137    pub fn new(
138        pcs_verifier: JaggedPcsVerifier<GC, SC::Config>,
139        machine: Machine<GC::F, SC::Air>,
140    ) -> Self {
141        Self { jagged_pcs_verifier: pcs_verifier, machine }
142    }
143
144    /// Get the maximum log row count.
145    #[must_use]
146    #[inline]
147    pub fn max_log_row_count(&self) -> usize {
148        self.jagged_pcs_verifier.max_log_row_count
149    }
150
151    /// Get the machine.
152    #[must_use]
153    #[inline]
154    pub fn machine(&self) -> &Machine<GC::F, SC::Air> {
155        &self.machine
156    }
157
158    /// Get the log stacking height.
159    #[must_use]
160    #[inline]
161    pub fn log_stacking_height(&self) -> u32 {
162        <SC::Config>::log_stacking_height(&self.jagged_pcs_verifier.pcs_verifier)
163    }
164
165    /// Get a new challenger.
166    #[must_use]
167    #[inline]
168    pub fn challenger(&self) -> GC::Challenger {
169        self.jagged_pcs_verifier.challenger()
170    }
171
172    /// Get the shape of a shard proof.
173    pub fn shape_from_proof(
174        &self,
175        proof: &ShardProof<GC, PcsProof<GC, SC>>,
176    ) -> CoreProofShape<GC::F, SC::Air> {
177        let shard_chips = self
178            .machine()
179            .chips()
180            .iter()
181            .filter(|air| proof.opened_values.chips.keys().any(|k| k == air.name()))
182            .cloned()
183            .collect::<BTreeSet<_>>();
184        debug_assert_eq!(shard_chips.len(), proof.opened_values.chips.len());
185
186        let areas = proof
187            .evaluation_proof
188            .row_counts_and_column_counts
189            .iter()
190            .map(|rc_cc| rc_cc.iter().map(|(r, c)| r * c).sum::<usize>())
191            .collect::<Vec<_>>();
192        let preprocessed_area = areas[0];
193        let main_area = areas[1];
194
195        let added_columns: Vec<usize> = proof
196            .evaluation_proof
197            .row_counts_and_column_counts
198            .iter()
199            .map(|cc| cc[cc.len() - 2].1 + 1)
200            .collect();
201
202        CoreProofShape {
203            shard_chips,
204            preprocessed_area,
205            main_area,
206            preprocessed_padding_cols: added_columns[0],
207            main_padding_cols: added_columns[1],
208        }
209    }
210
211    /// Compute the padded row adjustment for a chip.
212    pub fn compute_padded_row_adjustment(
213        chip: &Chip<GC::F, SC::Air>,
214        alpha: GC::EF,
215        public_values: &[GC::F],
216    ) -> GC::EF
217where {
218        let dummy_preprocessed_trace = vec![GC::EF::zero(); chip.preprocessed_width()];
219        let dummy_main_trace = vec![GC::EF::zero(); chip.width()];
220
221        let mut folder = VerifierConstraintFolder::<GC::F, GC::EF> {
222            preprocessed: RowMajorMatrixView::new_row(&dummy_preprocessed_trace),
223            main: RowMajorMatrixView::new_row(&dummy_main_trace),
224            alpha,
225            accumulator: GC::EF::zero(),
226            public_values,
227            _marker: PhantomData,
228        };
229
230        chip.eval(&mut folder);
231
232        folder.accumulator
233    }
234
235    /// Evaluates the constraints for a chip and opening.
236    pub fn eval_constraints(
237        chip: &Chip<GC::F, SC::Air>,
238        opening: &ChipOpenedValues<GC::F, GC::EF>,
239        alpha: GC::EF,
240        public_values: &[GC::F],
241    ) -> GC::EF
242where {
243        let mut folder = VerifierConstraintFolder::<GC::F, GC::EF> {
244            preprocessed: RowMajorMatrixView::new_row(&opening.preprocessed.local),
245            main: RowMajorMatrixView::new_row(&opening.main.local),
246            alpha,
247            accumulator: GC::EF::zero(),
248            public_values,
249            _marker: PhantomData,
250        };
251
252        chip.eval(&mut folder);
253
254        folder.accumulator
255    }
256
257    fn verify_opening_shape(
258        chip: &Chip<GC::F, SC::Air>,
259        opening: &ChipOpenedValues<GC::F, GC::EF>,
260    ) -> Result<(), OpeningShapeError> {
261        // Verify that the preprocessed width matches the expected value for the chip.
262        if opening.preprocessed.local.len() != chip.preprocessed_width() {
263            return Err(OpeningShapeError::PreprocessedWidthMismatch(
264                chip.preprocessed_width(),
265                opening.preprocessed.local.len(),
266            ));
267        }
268
269        // Verify that the main width matches the expected value for the chip.
270        if opening.main.local.len() != chip.width() {
271            return Err(OpeningShapeError::MainWidthMismatch(
272                chip.width(),
273                opening.main.local.len(),
274            ));
275        }
276
277        Ok(())
278    }
279}
280
281impl<GC: IopCtx, SC: ShardContext<GC>> ShardVerifier<GC, SC>
282where
283    GC::F: PrimeField32,
284{
285    /// Verify the zerocheck proof.
286    #[allow(clippy::too_many_arguments)]
287    #[allow(clippy::type_complexity)]
288    pub fn verify_zerocheck(
289        &self,
290        shard_chips: &BTreeSet<Chip<GC::F, SC::Air>>,
291        opened_values: &ShardOpenedValues<GC::F, GC::EF>,
292        gkr_evaluations: &LogUpEvaluations<GC::EF>,
293        proof: &ShardProof<GC, PcsProof<GC, SC>>,
294        public_values: &[GC::F],
295        challenger: &mut GC::Challenger,
296    ) -> Result<
297        (),
298        ShardVerifierError<GC::EF, <SC::Config as MultilinearPcsVerifier<GC>>::VerifierError>,
299    >
300where {
301        let max_log_row_count = self.jagged_pcs_verifier.max_log_row_count;
302
303        // Get the random challenge to merge the constraints.
304        let alpha = challenger.sample_ext_element::<GC::EF>();
305
306        let gkr_batch_open_challenge = challenger.sample_ext_element::<GC::EF>();
307
308        // Get the random lambda to RLC the zerocheck polynomials.
309        let lambda = challenger.sample_ext_element::<GC::EF>();
310
311        if gkr_evaluations.point.dimension() != max_log_row_count
312            || proof.zerocheck_proof.point_and_eval.0.dimension() != max_log_row_count
313        {
314            return Err(ShardVerifierError::InvalidShape);
315        }
316
317        // Get the value of eq(zeta, sumcheck's reduced point).
318        let zerocheck_eq_val = Mle::full_lagrange_eval(
319            &gkr_evaluations.point,
320            &proof.zerocheck_proof.point_and_eval.0,
321        );
322
323        // To verify the constraints, we need to check that the RLC'ed reduced eval in the zerocheck
324        // proof is correct.
325        let mut rlc_eval = GC::EF::zero();
326        for (chip, (chip_name, openings)) in shard_chips.iter().zip_eq(opened_values.chips.iter()) {
327            assert_eq!(chip.name(), chip_name);
328            // Verify the shape of the opening arguments matches the expected values.
329            Self::verify_opening_shape(chip, openings)?;
330
331            let mut point_extended = proof.zerocheck_proof.point_and_eval.0.clone();
332            point_extended.add_dimension(GC::EF::zero());
333            for &x in openings.degree.iter() {
334                if x * (x - GC::F::one()) != GC::F::zero() {
335                    return Err(ShardVerifierError::InvalidHeightBitDecomposition);
336                }
337            }
338            for &x in openings.degree.iter().skip(1) {
339                if x * *openings.degree.first().unwrap() != GC::F::zero() {
340                    return Err(ShardVerifierError::HeightTooLarge);
341                }
342            }
343
344            let geq_val = full_geq(&openings.degree, &point_extended);
345
346            let padded_row_adjustment =
347                Self::compute_padded_row_adjustment(chip, alpha, public_values);
348
349            let constraint_eval = Self::eval_constraints(chip, openings, alpha, public_values)
350                - padded_row_adjustment * geq_val;
351
352            let openings_batch = openings
353                .main
354                .local
355                .iter()
356                .chain(openings.preprocessed.local.iter())
357                .copied()
358                .zip(gkr_batch_open_challenge.powers().skip(1))
359                .map(|(opening, power)| opening * power)
360                .sum::<GC::EF>();
361
362            // Horner's method.
363            rlc_eval = rlc_eval * lambda + zerocheck_eq_val * (constraint_eval + openings_batch);
364        }
365
366        if proof.zerocheck_proof.point_and_eval.1 != rlc_eval {
367            return Err(ShardVerifierError::<
368                _,
369                <SC::Config as MultilinearPcsVerifier<GC>>::VerifierError,
370            >::ConstraintsCheckFailed(SumcheckError::InconsistencyWithEval));
371        }
372
373        let zerocheck_sum_modifications_from_gkr = gkr_evaluations
374            .chip_openings
375            .values()
376            .map(|chip_evaluation| {
377                chip_evaluation
378                    .main_trace_evaluations
379                    .deref()
380                    .iter()
381                    .copied()
382                    .chain(
383                        chip_evaluation
384                            .preprocessed_trace_evaluations
385                            .as_ref()
386                            .iter()
387                            .flat_map(|&evals| evals.deref().iter().copied()),
388                    )
389                    .zip(gkr_batch_open_challenge.powers().skip(1))
390                    .map(|(opening, power)| opening * power)
391                    .sum::<GC::EF>()
392            })
393            .collect::<Vec<_>>();
394
395        let zerocheck_sum_modification = zerocheck_sum_modifications_from_gkr
396            .iter()
397            .fold(GC::EF::zero(), |acc, modification| lambda * acc + *modification);
398
399        // Verify that the rlc claim matches the random linear combination of evaluation claims from
400        // gkr.
401        if proof.zerocheck_proof.claimed_sum != zerocheck_sum_modification {
402            return Err(ShardVerifierError::<
403                _,
404                <SC::Config as MultilinearPcsVerifier<GC>>::VerifierError,
405            >::ConstraintsCheckFailed(
406                SumcheckError::InconsistencyWithClaimedSum
407            ));
408        }
409
410        // Verify the zerocheck proof.
411        partially_verify_sumcheck_proof(
412            &proof.zerocheck_proof,
413            challenger,
414            max_log_row_count,
415            MAX_CONSTRAINT_DEGREE + 1,
416        )
417        .map_err(|e| {
418            ShardVerifierError::<
419                _,
420                <SC::Config as MultilinearPcsVerifier<GC>>::VerifierError,
421            >::ConstraintsCheckFailed(e)
422        })?;
423
424        // Observe the openings
425        let len = shard_chips.len();
426        challenger.observe(GC::F::from_canonical_usize(len));
427        for (_, opening) in opened_values.chips.iter() {
428            challenger.observe_variable_length_extension_slice(&opening.preprocessed.local);
429            challenger.observe_variable_length_extension_slice(&opening.main.local);
430        }
431
432        Ok(())
433    }
434
435    /// Verify a shard proof.
436    #[allow(clippy::too_many_lines)]
437    pub fn verify_shard(
438        &self,
439        vk: &MachineVerifyingKey<GC>,
440        proof: &ShardProof<GC, PcsProof<GC, SC>>,
441        challenger: &mut GC::Challenger,
442    ) -> Result<(), ShardVerifierConfigError<GC, SC::Config>>
443where {
444        let ShardProof {
445            main_commitment,
446            opened_values,
447            evaluation_proof,
448            zerocheck_proof,
449            public_values,
450            logup_gkr_proof,
451        } = proof;
452
453        let max_log_row_count = self.jagged_pcs_verifier.max_log_row_count;
454
455        if public_values.len() != PROOF_MAX_NUM_PVS
456            || public_values.len() < self.machine.num_pv_elts()
457        {
458            tracing::error!("invalid public values length: {}", public_values.len());
459            return Err(ShardVerifierError::InvalidPublicValues);
460        }
461
462        if public_values[self.machine.num_pv_elts()..].iter().any(|v| *v != GC::F::zero()) {
463            return Err(ShardVerifierError::InvalidPublicValues);
464        }
465        let shard_chips = opened_values.chips.keys().cloned().collect::<BTreeSet<_>>();
466
467        // Observe the public values.
468        challenger.observe_constant_length_extension_slice(public_values);
469        // Observe the main commitment.
470        challenger.observe(*main_commitment);
471        // Observe the number of chips.
472        let shard_chips_len = shard_chips.len();
473        challenger.observe(GC::F::from_canonical_usize(shard_chips_len));
474
475        let mut heights: BTreeMap<String, GC::F> = BTreeMap::new();
476        for (name, chip_values) in opened_values.chips.iter() {
477            if chip_values.degree.len() != max_log_row_count + 1 || chip_values.degree.len() >= 30 {
478                return Err(ShardVerifierError::InvalidShape);
479            }
480            let acc =
481                chip_values.degree.iter().fold(GC::F::zero(), |acc, &x| x + GC::F::two() * acc);
482            heights.insert(name.clone(), acc);
483            challenger.observe(acc);
484            challenger.observe(GC::F::from_canonical_usize(name.len()));
485            for byte in name.as_bytes() {
486                challenger.observe(GC::F::from_canonical_u8(*byte));
487            }
488        }
489
490        let machine_chip_names =
491            self.machine.chips().iter().map(|c| c.name().to_string()).collect::<BTreeSet<_>>();
492
493        let preprocessed_chips = self
494            .machine
495            .chips()
496            .iter()
497            .filter(|chip| chip.preprocessed_width() != 0)
498            .collect::<BTreeSet<_>>();
499
500        // Check:
501        // 1. All shard chips in the proof are expected from the machine configuration.
502        // 2. All chips with non-zero preprocessed width in the machine configuration appear in
503        //  the proof.
504        // 3. The preprocessed widths as deduced from the jagged proof exactly match those
505        // expected from the machine configuration.
506        if !shard_chips.is_subset(&machine_chip_names)
507            || !preprocessed_chips
508                .iter()
509                .map(|chip| chip.name().to_string())
510                .collect::<BTreeSet<_>>()
511                .is_subset(&shard_chips)
512            || evaluation_proof.row_counts_and_column_counts[0]
513                .iter()
514                .map(|&(_, c)| c)
515                .take(preprocessed_chips.len())
516                .collect::<Vec<_>>()
517                != preprocessed_chips
518                    .iter()
519                    .map(|chip| chip.preprocessed_width())
520                    .collect::<Vec<_>>()
521        {
522            return Err(ShardVerifierError::InvalidShape);
523        }
524
525        let shard_chips = self
526            .machine
527            .chips()
528            .iter()
529            .filter(|chip| shard_chips.contains(chip.name()))
530            .cloned()
531            .collect::<BTreeSet<_>>();
532
533        if shard_chips.len() != shard_chips_len || shard_chips_len == 0 {
534            return Err(ShardVerifierError::InvalidShape);
535        }
536
537        if !self.machine().shape().chip_clusters.contains(&shard_chips) {
538            return Err(ShardVerifierError::InvalidShape);
539        }
540
541        let degrees = opened_values
542            .chips
543            .iter()
544            .map(|x| (x.0.clone(), x.1.degree.clone()))
545            .collect::<BTreeMap<_, _>>();
546
547        if shard_chips.len() != opened_values.chips.len()
548            || shard_chips.len() != degrees.len()
549            || shard_chips.len() != logup_gkr_proof.logup_evaluations.chip_openings.len()
550        {
551            return Err(ShardVerifierError::InvalidShape);
552        }
553
554        for ((shard_chip, (chip_name, _)), (gkr_chip_name, gkr_opened_values)) in shard_chips
555            .iter()
556            .zip_eq(opened_values.chips.iter())
557            .zip_eq(logup_gkr_proof.logup_evaluations.chip_openings.iter())
558        {
559            if shard_chip.name() != chip_name.as_str() {
560                return Err(ShardVerifierError::InvalidChipOrder(
561                    shard_chip.name().to_string(),
562                    chip_name.clone(),
563                ));
564            }
565            if shard_chip.name() != gkr_chip_name.as_str() {
566                return Err(ShardVerifierError::InvalidChipOrder(
567                    shard_chip.name().to_string(),
568                    gkr_chip_name.clone(),
569                ));
570            }
571
572            if gkr_opened_values
573                .preprocessed_trace_evaluations
574                .as_ref()
575                .map_or(0, MleEval::num_polynomials)
576                != shard_chip.preprocessed_width()
577            {
578                return Err(ShardVerifierError::InvalidShape);
579            }
580
581            if gkr_opened_values.main_trace_evaluations.len() != shard_chip.width() {
582                return Err(ShardVerifierError::InvalidShape);
583            }
584        }
585
586        // Verify the logup GKR proof.
587        LogUpGkrVerifier::<GC, SC>::verify_logup_gkr(
588            &shard_chips,
589            &degrees,
590            max_log_row_count,
591            logup_gkr_proof,
592            public_values,
593            challenger,
594        )
595        .map_err(ShardVerifierError::GkrVerificationFailed)?;
596
597        // Verify the zerocheck proof.
598        self.verify_zerocheck(
599            &shard_chips,
600            opened_values,
601            &logup_gkr_proof.logup_evaluations,
602            proof,
603            public_values,
604            challenger,
605        )?;
606
607        // Verify the opening proof.
608        // `preprocessed_openings_for_proof` is `Vec` of preprocessed `AirOpenedValues` of chips.
609        // `main_openings_for_proof` is `Vec` of main `AirOpenedValues` of chips.
610        let (preprocessed_openings_for_proof, main_openings_for_proof): (Vec<_>, Vec<_>) = proof
611            .opened_values
612            .chips
613            .values()
614            .map(|opening| (opening.preprocessed.clone(), opening.main.clone()))
615            .unzip();
616
617        // `preprocessed_openings` is the `Vec` of preprocessed openings of all chips.
618        let preprocessed_openings = preprocessed_openings_for_proof
619            .iter()
620            .map(|x| x.local.iter().as_slice())
621            .collect::<Vec<_>>();
622
623        // `main_openings` is the `Evaluations` derived by collecting all the main openings.
624        let main_openings = main_openings_for_proof
625            .iter()
626            .map(|x| x.local.iter().copied().collect::<MleEval<_>>())
627            .collect::<Evaluations<_>>();
628
629        // `filtered_preprocessed_openings` is the `Evaluations` derived by collecting all the
630        // non-empty preprocessed openings.
631        let filtered_preprocessed_openings = preprocessed_openings
632            .into_iter()
633            .filter(|x| !x.is_empty())
634            .map(|x| x.iter().copied().collect::<MleEval<_>>())
635            .collect::<Evaluations<_>>();
636
637        let (commitments, openings) = (
638            vec![vk.preprocessed_commit, *main_commitment],
639            Rounds { rounds: vec![filtered_preprocessed_openings, main_openings] },
640        );
641
642        let flattened_openings = openings
643            .into_iter()
644            .map(|round| {
645                round
646                    .into_iter()
647                    .flat_map(std::iter::IntoIterator::into_iter)
648                    .collect::<MleEval<_>>()
649            })
650            .collect::<Vec<_>>();
651
652        self.jagged_pcs_verifier
653            .verify_trusted_evaluations(
654                &commitments,
655                zerocheck_proof.point_and_eval.0.clone(),
656                flattened_openings.as_slice(),
657                evaluation_proof,
658                challenger,
659            )
660            .map_err(ShardVerifierError::InvalidopeningArgument)?;
661
662        let [mut preprocessed_row_counts, mut main_row_counts]: [Vec<usize>; 2] = proof
663            .evaluation_proof
664            .row_counts_and_column_counts
665            .clone()
666            .into_iter()
667            .map(|r_c| r_c.into_iter().map(|(r, _)| r).collect::<Vec<_>>())
668            .collect::<Vec<_>>()
669            .try_into()
670            .unwrap();
671
672        // Remove the last two row row counts because we add the padding columns as two extra
673        // tables.
674        for _ in 0..2 {
675            preprocessed_row_counts.pop();
676            main_row_counts.pop();
677        }
678
679        let mut preprocessed_chip_degrees = vec![];
680        let mut main_chip_degrees = vec![];
681
682        for chip in shard_chips.iter() {
683            if chip.preprocessed_width() > 0 {
684                preprocessed_chip_degrees.push(
685                    proof.opened_values.chips[chip.name()]
686                        .degree
687                        .bit_string_evaluation()
688                        .as_canonical_u32(),
689                );
690            }
691            main_chip_degrees.push(
692                proof.opened_values.chips[chip.name()]
693                    .degree
694                    .bit_string_evaluation()
695                    .as_canonical_u32(),
696            );
697        }
698
699        // Check that the row counts in the jagged proof match the chip degrees in the
700        // `ChipOpenedValues` struct.
701        for (chip_opening_row_counts, proof_row_counts) in
702            [preprocessed_chip_degrees, main_chip_degrees]
703                .iter()
704                .zip_eq([preprocessed_row_counts, main_row_counts].iter())
705        {
706            if proof_row_counts.len() != chip_opening_row_counts.len() {
707                return Err(ShardVerifierError::InvalidShape);
708            }
709            for (a, b) in proof_row_counts.iter().zip(chip_opening_row_counts.iter()) {
710                if *a != *b as usize {
711                    return Err(ShardVerifierError::InvalidShape);
712                }
713            }
714        }
715
716        // Check that the shape of the proof struct column counts matches the shape of the shard
717        // chips. In the future, we may allow for a layer of abstraction where the proof row
718        // counts and column counts can be separate from the machine chips (e.g. if two
719        // chips in a row have the same height, the proof could have the column counts
720        // merged).
721        if !proof
722            .evaluation_proof
723            .row_counts_and_column_counts
724            .iter()
725            .cloned()
726            .zip(
727                once(
728                    shard_chips
729                        .iter()
730                        .map(MachineAir::<GC::F>::preprocessed_width)
731                        .filter(|&width| width > 0)
732                        .collect::<Vec<_>>(),
733                )
734                .chain(once(shard_chips.iter().map(Chip::width).collect())),
735            )
736            // The jagged verifier has already checked that `a.len()>=2`, so this indexing is safe.
737            .all(|(a, b)| a[..a.len() - 2].iter().map(|(_, c)| *c).collect::<Vec<_>>() == b)
738        {
739            Err(ShardVerifierError::InvalidShape)
740        } else {
741            Ok(())
742        }
743    }
744}
745
746impl<GC: IopCtx<F: TwoAdicField, EF: TwoAdicField>, A> ShardVerifier<GC, SP1SC<GC, A>>
747where
748    A: ZerocheckAir<GC::F, GC::EF>,
749    GC::F: PrimeField32,
750{
751    /// Create a shard verifier from basefold parameters.
752    #[must_use]
753    pub fn from_basefold_parameters(
754        fri_config: FriConfig<GC::F>,
755        log_stacking_height: u32,
756        max_log_row_count: usize,
757        machine: Machine<GC::F, A>,
758    ) -> Self {
759        let pcs_verifier = JaggedPcsVerifier::<GC, SP1Pcs<GC>>::new_from_basefold_params(
760            fri_config,
761            log_stacking_height,
762            max_log_row_count,
763            NUM_SP1_COMMITMENTS,
764        );
765        Self { jagged_pcs_verifier: pcs_verifier, machine }
766    }
767}
768
769impl<GC: IopCtx<F: TwoAdicField, EF: TwoAdicField>, A>
770    ShardVerifier<GC, ShardContextImpl<GC, Verifier<GC>, A>>
771where
772    A: ZerocheckAir<GC::F, GC::EF>,
773    GC::F: PrimeField32,
774{
775    /// Create a shard verifier from basefold parameters.
776    #[must_use]
777    pub fn from_config(
778        config: &WhirProofShape<GC::F>,
779        max_log_row_count: usize,
780        machine: Machine<GC::F, A>,
781        num_expected_commitments: usize,
782        challenger: &mut GC::Challenger,
783    ) -> Self {
784        let merkle_verifier = MerkleTreeTcs::default();
785        let verifier = Verifier::<GC>::new(
786            merkle_verifier,
787            config.clone(),
788            num_expected_commitments,
789            challenger,
790        );
791
792        let jagged_verifier =
793            JaggedPcsVerifier::<GC, Verifier<GC>>::new(verifier, max_log_row_count);
794        Self { jagged_pcs_verifier: jagged_verifier, machine }
795    }
796}