p3_circle/
pcs.rs

1use alloc::collections::BTreeMap;
2use alloc::vec;
3use alloc::vec::Vec;
4use core::marker::PhantomData;
5
6use itertools::{Itertools, izip};
7use p3_challenger::{CanObserve, FieldChallenger, GrindingChallenger};
8use p3_commit::{BatchOpening, BatchOpeningRef, Mmcs, OpenedValues, Pcs, PolynomialSpace};
9use p3_field::extension::ComplexExtendable;
10use p3_field::{ExtensionField, Field};
11use p3_fri::FriParameters;
12use p3_fri::verifier::FriError;
13use p3_matrix::dense::{RowMajorMatrix, RowMajorMatrixCow};
14use p3_matrix::row_index_mapped::RowIndexMappedView;
15use p3_matrix::{Dimensions, Matrix};
16use p3_maybe_rayon::prelude::*;
17use p3_util::log2_strict_usize;
18use p3_util::zip_eq::zip_eq;
19use serde::{Deserialize, Serialize};
20use thiserror::Error;
21use tracing::info_span;
22
23use crate::deep_quotient::{deep_quotient_reduce_row, extract_lambda};
24use crate::domain::CircleDomain;
25use crate::folding::{CircleFriFolding, CircleFriFoldingForMmcs, fold_y, fold_y_row};
26use crate::point::Point;
27use crate::prover::prove;
28use crate::verifier::verify;
29use crate::{CfftPerm, CfftPermutable, CircleEvaluations, CircleFriProof, cfft_permute_index};
30
31#[derive(Debug)]
32pub struct CirclePcs<Val: Field, InputMmcs, FriMmcs> {
33    pub mmcs: InputMmcs,
34    pub fri_params: FriParameters<FriMmcs>,
35    pub _phantom: PhantomData<Val>,
36}
37
38impl<Val: Field, InputMmcs, FriMmcs> CirclePcs<Val, InputMmcs, FriMmcs> {
39    pub const fn new(mmcs: InputMmcs, fri_params: FriParameters<FriMmcs>) -> Self {
40        Self {
41            mmcs,
42            fri_params,
43            _phantom: PhantomData,
44        }
45    }
46}
47
48#[derive(Serialize, Deserialize, Clone)]
49#[serde(bound = "")]
50pub struct CircleInputProof<
51    Val: Field,
52    Challenge: Field,
53    InputMmcs: Mmcs<Val>,
54    FriMmcs: Mmcs<Challenge>,
55> {
56    input_openings: Vec<BatchOpening<Val, InputMmcs>>,
57    first_layer_siblings: Vec<Challenge>,
58    first_layer_proof: FriMmcs::Proof,
59}
60
61#[derive(Debug, Error)]
62pub enum InputError<InputMmcsError, FriMmcsError>
63where
64    InputMmcsError: core::fmt::Debug,
65    FriMmcsError: core::fmt::Debug,
66{
67    #[error("input MMCS error: {0:?}")]
68    InputMmcsError(InputMmcsError),
69    #[error("first layer MMCS error: {0:?}")]
70    FirstLayerMmcsError(FriMmcsError),
71    #[error("input shape error: mismatched dimensions")]
72    InputShapeError,
73}
74
75#[derive(Serialize, Deserialize, Clone)]
76#[serde(bound(
77    serialize = "Witness: Serialize",
78    deserialize = "Witness: Deserialize<'de>"
79))]
80pub struct CirclePcsProof<
81    Val: Field,
82    Challenge: Field,
83    InputMmcs: Mmcs<Val>,
84    FriMmcs: Mmcs<Challenge>,
85    Witness,
86> {
87    first_layer_commitment: FriMmcs::Commitment,
88    lambdas: Vec<Challenge>,
89    fri_proof: CircleFriProof<
90        Challenge,
91        FriMmcs,
92        Witness,
93        CircleInputProof<Val, Challenge, InputMmcs, FriMmcs>,
94    >,
95}
96
97impl<Val, InputMmcs, FriMmcs, Challenge, Challenger> Pcs<Challenge, Challenger>
98    for CirclePcs<Val, InputMmcs, FriMmcs>
99where
100    Val: ComplexExtendable,
101    Challenge: ExtensionField<Val>,
102    InputMmcs: Mmcs<Val>,
103    FriMmcs: Mmcs<Challenge>,
104    Challenger: FieldChallenger<Val> + GrindingChallenger + CanObserve<FriMmcs::Commitment>,
105{
106    type Domain = CircleDomain<Val>;
107    type Commitment = InputMmcs::Commitment;
108    type ProverData = InputMmcs::ProverData<RowMajorMatrix<Val>>;
109    type EvaluationsOnDomain<'a> = RowIndexMappedView<CfftPerm, RowMajorMatrixCow<'a, Val>>;
110    type Proof = CirclePcsProof<Val, Challenge, InputMmcs, FriMmcs, Challenger::Witness>;
111    type Error = FriError<FriMmcs::Error, InputError<InputMmcs::Error, FriMmcs::Error>>;
112    const ZK: bool = false;
113
114    fn natural_domain_for_degree(&self, degree: usize) -> Self::Domain {
115        CircleDomain::standard(log2_strict_usize(degree))
116    }
117
118    fn commit(
119        &self,
120        evaluations: impl IntoIterator<Item = (Self::Domain, RowMajorMatrix<Val>)>,
121    ) -> (Self::Commitment, Self::ProverData) {
122        let ldes = evaluations
123            .into_iter()
124            .map(|(domain, evals)| {
125                assert!(
126                    domain.log_n >= 2,
127                    "CirclePcs cannot commit to a matrix with fewer than 4 rows.",
128                    // (because we bivariate fold one bit, and fri needs one more bit)
129                );
130                CircleEvaluations::from_natural_order(domain, evals)
131                    .extrapolate(CircleDomain::standard(
132                        domain.log_n + self.fri_params.log_blowup,
133                    ))
134                    .to_cfft_order()
135            })
136            .collect_vec();
137        let (comm, mmcs_data) = self.mmcs.commit(ldes);
138        (comm, mmcs_data)
139    }
140
141    fn get_quotient_ldes(
142        &self,
143        evaluations: impl IntoIterator<Item = (Self::Domain, RowMajorMatrix<Val>)>,
144        _num_chunks: usize,
145    ) -> Vec<RowMajorMatrix<Val>> {
146        evaluations
147            .into_iter()
148            .map(|(domain, evals)| {
149                assert!(
150                    domain.log_n >= 2,
151                    "CirclePcs cannot commit to a matrix with fewer than 4 rows.",
152                    // (because we bivariate fold one bit, and fri needs one more bit)
153                );
154                CircleEvaluations::from_natural_order(domain, evals)
155                    .extrapolate(CircleDomain::standard(
156                        domain.log_n + self.fri_params.log_blowup,
157                    ))
158                    .to_cfft_order()
159            })
160            .collect_vec()
161    }
162
163    fn commit_ldes(&self, ldes: Vec<RowMajorMatrix<Val>>) -> (Self::Commitment, Self::ProverData) {
164        self.mmcs.commit(ldes)
165    }
166
167    fn get_evaluations_on_domain<'a>(
168        &self,
169        data: &'a Self::ProverData,
170        idx: usize,
171        domain: Self::Domain,
172    ) -> Self::EvaluationsOnDomain<'a> {
173        let mat = self.mmcs.get_matrices(data)[idx].as_view();
174        let committed_domain = CircleDomain::standard(log2_strict_usize(mat.height()));
175        if domain == committed_domain {
176            mat.as_cow().cfft_perm_rows()
177        } else {
178            CircleEvaluations::from_cfft_order(committed_domain, mat)
179                .extrapolate(domain)
180                .to_cfft_order()
181                .as_cow()
182                .cfft_perm_rows()
183        }
184    }
185
186    fn open(
187        &self,
188        // For each round,
189        rounds: Vec<(
190            &Self::ProverData,
191            // for each matrix,
192            Vec<
193                // points to open
194                Vec<Challenge>,
195            >,
196        )>,
197        challenger: &mut Challenger,
198    ) -> (OpenedValues<Challenge>, Self::Proof) {
199        // Open matrices at points
200        let values: OpenedValues<Challenge> = rounds
201            .iter()
202            .map(|(data, points_for_mats)| {
203                let mats = self.mmcs.get_matrices(data);
204                debug_assert_eq!(
205                    mats.len(),
206                    points_for_mats.len(),
207                    "Mismatched number of matrices and points"
208                );
209                izip!(mats, points_for_mats)
210                    .map(|(mat, points_for_mat)| {
211                        let log_height = log2_strict_usize(mat.height());
212                        // It was committed in cfft order.
213                        let evals = CircleEvaluations::from_cfft_order(
214                            CircleDomain::standard(log_height),
215                            mat.as_view(),
216                        );
217                        points_for_mat
218                            .iter()
219                            .map(|&zeta| {
220                                let zeta = Point::from_projective_line(zeta);
221                                let ps_at_zeta =
222                                    info_span!("compute opened values with Lagrange interpolation")
223                                        .in_scope(|| evals.evaluate_at_point(zeta));
224                                challenger.observe_algebra_slice(&ps_at_zeta);
225                                ps_at_zeta
226                            })
227                            .collect()
228                    })
229                    .collect()
230            })
231            .collect();
232
233        // Batch combination challenge
234        let alpha: Challenge = challenger.sample_algebra_element();
235
236        /*
237        We are reducing columns ("ro" = reduced opening) with powers of alpha:
238          ro = .. + α^n c_n + α^(n+1) c_(n+1) + ..
239        But we want to precompute small powers of alpha, and batch the columns. So we can do:
240          ro = .. + α^n (α^0 c_n + α^1 c_(n+1) + ..) + ..
241        reusing the α^0, α^1, etc., then at the end of each column batch we multiply by the α^n.
242        (Due to circle stark specifics, we need 2 powers of α for each column, so actually α^(2n)).
243        We store this α^(2n), the running reducing factor per log_height, and call it the "alpha offset".
244        */
245
246        // log_height -> (alpha offset, reduced openings column)
247        let mut reduced_openings: BTreeMap<usize, (Challenge, Vec<Challenge>)> = BTreeMap::new();
248
249        rounds
250            .iter()
251            .zip(values.iter())
252            .for_each(|((data, points_for_mats), values)| {
253                let mats = self.mmcs.get_matrices(data);
254                izip!(mats, points_for_mats, values).for_each(|(mat, points_for_mat, values)| {
255                    let log_height = log2_strict_usize(mat.height());
256                    // It was committed in cfft order.
257                    let evals = CircleEvaluations::from_cfft_order(
258                        CircleDomain::standard(log_height),
259                        mat.as_view(),
260                    );
261
262                    let (alpha_offset, reduced_opening_for_log_height) =
263                        reduced_openings.entry(log_height).or_insert_with(|| {
264                            (Challenge::ONE, vec![Challenge::ZERO; 1 << log_height])
265                        });
266
267                    points_for_mat
268                        .iter()
269                        .zip(values.iter())
270                        .for_each(|(&zeta, ps_at_zeta)| {
271                            let zeta = Point::from_projective_line(zeta);
272
273                            // Reduce this matrix, as a deep quotient, into one column with powers of α.
274                            let mat_ros = evals.deep_quotient_reduce(alpha, zeta, ps_at_zeta);
275
276                            // Fold it into our running reduction, offset by alpha_offset.
277                            reduced_opening_for_log_height
278                                .par_iter_mut()
279                                .zip(mat_ros)
280                                .for_each(|(ro, mat_ro)| {
281                                    *ro += *alpha_offset * mat_ro;
282                                });
283
284                            // Update alpha_offset from α^i -> α^(i + 2 * width)
285                            *alpha_offset *= alpha.exp_u64(2 * evals.values.width() as u64);
286                        });
287                });
288            });
289
290        // Iterate over our reduced columns and extract lambda - the multiple of the vanishing polynomial
291        // which may appear in the reduced quotient due to CFFT dimension gap.
292
293        let mut lambdas = vec![];
294        let mut log_heights = vec![];
295        let first_layer_mats: Vec<RowMajorMatrix<Challenge>> = reduced_openings
296            .into_iter()
297            .map(|(log_height, (_, mut ro))| {
298                assert!(log_height > 0);
299                log_heights.push(log_height);
300                let lambda = extract_lambda(&mut ro, self.fri_params.log_blowup);
301                lambdas.push(lambda);
302                // Prepare for first layer fold with 2 siblings per leaf.
303                RowMajorMatrix::new(ro, 2)
304            })
305            .collect();
306        let log_max_height = log_heights.iter().max().copied().unwrap();
307
308        // Commit to reduced openings at each log_height, so we can challenge a global
309        // folding factor for all first layers, which we use for a "manual" (not part of p3-fri) fold.
310        // This is necessary because the first layer of folding uses different twiddles, so it's easiest
311        // to do it here, before p3-fri.
312
313        let (first_layer_commitment, first_layer_data) =
314            self.fri_params.mmcs.commit(first_layer_mats);
315        challenger.observe(first_layer_commitment.clone());
316        let bivariate_beta: Challenge = challenger.sample_algebra_element();
317
318        // Fold all first layers at bivariate_beta.
319
320        let fri_input: Vec<Vec<Challenge>> = self
321            .fri_params
322            .mmcs
323            .get_matrices(&first_layer_data)
324            .into_iter()
325            .map(|m| fold_y(bivariate_beta, m))
326            // Reverse, because FRI expects descending by height
327            .rev()
328            .collect();
329
330        let folding: CircleFriFoldingForMmcs<Val, Challenge, InputMmcs, FriMmcs> =
331            CircleFriFolding(PhantomData);
332
333        let fri_proof = prove(&folding, &self.fri_params, fri_input, challenger, |index| {
334            // CircleFriFolder asks for an extra query index bit, so we use that here to index
335            // the first layer fold.
336
337            // Open the input (big opening, lots of columns) at the full index...
338            let input_openings = rounds
339                .iter()
340                .map(|(data, _)| {
341                    let log_max_batch_height = log2_strict_usize(self.mmcs.get_max_height(data));
342                    let reduced_index = index >> (log_max_height - log_max_batch_height);
343                    self.mmcs.open_batch(reduced_index, data)
344                })
345                .collect();
346
347            // We committed to first_layer in pairs, so open the reduced index and include the sibling
348            // as part of the input proof.
349            let (first_layer_values, first_layer_proof) = self
350                .fri_params
351                .mmcs
352                .open_batch(index >> 1, &first_layer_data)
353                .unpack();
354            let first_layer_siblings = izip!(&first_layer_values, &log_heights)
355                .map(|(v, log_height)| {
356                    let reduced_index = index >> (log_max_height - log_height);
357                    let sibling_index = (reduced_index & 1) ^ 1;
358                    v[sibling_index]
359                })
360                .collect();
361            CircleInputProof {
362                input_openings,
363                first_layer_siblings,
364                first_layer_proof,
365            }
366        });
367
368        (
369            values,
370            CirclePcsProof {
371                first_layer_commitment,
372                lambdas,
373                fri_proof,
374            },
375        )
376    }
377
378    fn verify(
379        &self,
380        // For each round:
381        rounds: Vec<(
382            Self::Commitment,
383            // for each matrix:
384            Vec<(
385                // its domain,
386                Self::Domain,
387                // for each point:
388                Vec<(
389                    // the point,
390                    Challenge,
391                    // values at the point
392                    Vec<Challenge>,
393                )>,
394            )>,
395        )>,
396        proof: &Self::Proof,
397        challenger: &mut Challenger,
398    ) -> Result<(), Self::Error> {
399        // Write evaluations to challenger
400        for (_, round) in &rounds {
401            for (_, mat) in round {
402                for (_, point) in mat {
403                    challenger.observe_algebra_slice(point);
404                }
405            }
406        }
407
408        // Batch combination challenge
409        let alpha: Challenge = challenger.sample_algebra_element();
410        challenger.observe(proof.first_layer_commitment.clone());
411        let bivariate_beta: Challenge = challenger.sample_algebra_element();
412
413        // +1 to account for first layer
414        let log_global_max_height =
415            proof.fri_proof.commit_phase_commits.len() + self.fri_params.log_blowup + 1;
416
417        let folding: CircleFriFoldingForMmcs<Val, Challenge, InputMmcs, FriMmcs> =
418            CircleFriFolding(PhantomData);
419
420        verify(
421            &folding,
422            &self.fri_params,
423            &proof.fri_proof,
424            challenger,
425            |index, input_proof| {
426                // log_height -> (alpha_offset, ro)
427                let mut reduced_openings = BTreeMap::new();
428
429                let CircleInputProof {
430                    input_openings,
431                    first_layer_siblings,
432                    first_layer_proof,
433                } = input_proof;
434
435                for (batch_opening, (batch_commit, mats)) in
436                    zip_eq(input_openings, &rounds, InputError::InputShapeError)?
437                {
438                    let batch_heights: Vec<usize> = mats
439                        .iter()
440                        .map(|(domain, _)| domain.size() << self.fri_params.log_blowup)
441                        .collect_vec();
442                    let batch_dims: Vec<Dimensions> = batch_heights
443                        .iter()
444                        // todo: mmcs doesn't really need width
445                        .map(|&height| Dimensions { width: 0, height })
446                        .collect_vec();
447
448                    let (dims, idx) = batch_heights
449                        .iter()
450                        .max()
451                        .map(|x| log2_strict_usize(*x))
452                        .map_or_else(
453                            ||
454                            // Empty batch?
455                            (&[][..], 0),
456                            |log_batch_max_height| {
457                                (
458                                    &batch_dims[..],
459                                    index >> (log_global_max_height - log_batch_max_height),
460                                )
461                            },
462                        );
463
464                    self.mmcs
465                        .verify_batch(batch_commit, dims, idx, batch_opening.into())
466                        .map_err(InputError::InputMmcsError)?;
467
468                    for (ps_at_x, (mat_domain, mat_points_and_values)) in zip_eq(
469                        &batch_opening.opened_values,
470                        mats,
471                        InputError::InputShapeError,
472                    )? {
473                        let log_height = mat_domain.log_n + self.fri_params.log_blowup;
474                        let bits_reduced = log_global_max_height - log_height;
475                        let orig_idx = cfft_permute_index(index >> bits_reduced, log_height);
476
477                        let committed_domain = CircleDomain::standard(log_height);
478                        let x = committed_domain.nth_point(orig_idx);
479
480                        let (alpha_offset, ro) = reduced_openings
481                            .entry(log_height)
482                            .or_insert((Challenge::ONE, Challenge::ZERO));
483                        let alpha_pow_width_2 = alpha.exp_u64(ps_at_x.len() as u64).square();
484
485                        for (zeta_uni, ps_at_zeta) in mat_points_and_values {
486                            let zeta = Point::from_projective_line(*zeta_uni);
487
488                            *ro += *alpha_offset
489                                * deep_quotient_reduce_row(alpha, x, zeta, ps_at_x, ps_at_zeta);
490
491                            *alpha_offset *= alpha_pow_width_2;
492                        }
493                    }
494                }
495
496                // Verify bivariate fold and lambda correction
497
498                let (mut fri_input, fl_dims, fl_leaves): (Vec<_>, Vec<_>, Vec<_>) = zip_eq(
499                    zip_eq(
500                        reduced_openings,
501                        first_layer_siblings,
502                        InputError::InputShapeError,
503                    )?,
504                    &proof.lambdas,
505                    InputError::InputShapeError,
506                )?
507                .map(|(((log_height, (_, ro)), &fl_sib), &lambda)| {
508                    assert!(log_height > 0);
509
510                    let orig_size = log_height - self.fri_params.log_blowup;
511                    let bits_reduced = log_global_max_height - log_height;
512                    let orig_idx = cfft_permute_index(index >> bits_reduced, log_height);
513
514                    let lde_domain = CircleDomain::standard(log_height);
515                    let p: Point<Val> = lde_domain.nth_point(orig_idx);
516
517                    let lambda_corrected = ro - lambda * p.v_n(orig_size);
518
519                    let mut fl_values = vec![lambda_corrected; 2];
520                    fl_values[((index >> bits_reduced) & 1) ^ 1] = fl_sib;
521
522                    let fri_input = (
523                        // - 1 here is because we have already folded a layer.
524                        log_height - 1,
525                        fold_y_row(
526                            index >> (bits_reduced + 1),
527                            // - 1 here is log_arity.
528                            log_height - 1,
529                            bivariate_beta,
530                            fl_values.iter().copied(),
531                        ),
532                    );
533
534                    let fl_dims = Dimensions {
535                        width: 0,
536                        height: 1 << (log_height - 1),
537                    };
538
539                    (fri_input, fl_dims, fl_values)
540                })
541                .multiunzip();
542
543                // sort descending
544                fri_input.reverse();
545
546                self.fri_params
547                    .mmcs
548                    .verify_batch(
549                        &proof.first_layer_commitment,
550                        &fl_dims,
551                        index >> 1,
552                        BatchOpeningRef::new(&fl_leaves, first_layer_proof),
553                    )
554                    .map_err(InputError::FirstLayerMmcsError)?;
555
556                Ok(fri_input)
557            },
558        )
559    }
560}
561
562#[cfg(test)]
563mod tests {
564    use p3_challenger::{HashChallenger, SerializingChallenger32};
565    use p3_commit::ExtensionMmcs;
566    use p3_field::extension::BinomialExtensionField;
567    use p3_fri::create_test_fri_params;
568    use p3_keccak::Keccak256Hash;
569    use p3_merkle_tree::MerkleTreeMmcs;
570    use p3_mersenne_31::Mersenne31;
571    use p3_symmetric::{CompressionFunctionFromHasher, SerializingHasher};
572    use rand::rngs::SmallRng;
573    use rand::{Rng, SeedableRng};
574
575    use super::*;
576
577    #[test]
578    fn circle_pcs() {
579        // Very simple pcs test. More rigorous tests in p3_fri/tests/pcs.
580
581        let mut rng = SmallRng::seed_from_u64(0);
582
583        type Val = Mersenne31;
584        type Challenge = BinomialExtensionField<Mersenne31, 3>;
585
586        type ByteHash = Keccak256Hash;
587        type FieldHash = SerializingHasher<ByteHash>;
588        let byte_hash = ByteHash {};
589        let field_hash = FieldHash::new(byte_hash);
590
591        type MyCompress = CompressionFunctionFromHasher<ByteHash, 2, 32>;
592        let compress = MyCompress::new(byte_hash);
593
594        type ValMmcs = MerkleTreeMmcs<Val, u8, FieldHash, MyCompress, 32>;
595        let val_mmcs = ValMmcs::new(field_hash, compress);
596
597        type ChallengeMmcs = ExtensionMmcs<Val, Challenge, ValMmcs>;
598        let challenge_mmcs = ChallengeMmcs::new(val_mmcs.clone());
599
600        type Challenger = SerializingChallenger32<Val, HashChallenger<u8, ByteHash, 32>>;
601
602        let fri_params = create_test_fri_params(challenge_mmcs, 0);
603
604        type Pcs = CirclePcs<Val, ValMmcs, ChallengeMmcs>;
605        let pcs = Pcs {
606            mmcs: val_mmcs,
607            fri_params,
608            _phantom: PhantomData,
609        };
610
611        let log_n = 10;
612
613        let d = <Pcs as p3_commit::Pcs<Challenge, Challenger>>::natural_domain_for_degree(
614            &pcs,
615            1 << log_n,
616        );
617
618        let evals = RowMajorMatrix::rand(&mut rng, 1 << log_n, 1);
619
620        let (comm, data) =
621            <Pcs as p3_commit::Pcs<Challenge, Challenger>>::commit(&pcs, [(d, evals)]);
622
623        let zeta: Challenge = rng.random();
624
625        let mut chal = Challenger::from_hasher(vec![], byte_hash);
626        let (values, proof) = pcs.open(vec![(&data, vec![vec![zeta]])], &mut chal);
627
628        let mut chal = Challenger::from_hasher(vec![], byte_hash);
629        pcs.verify(
630            vec![(comm, vec![(d, vec![(zeta, values[0][0][0].clone())])])],
631            &proof,
632            &mut chal,
633        )
634        .expect("verify err");
635    }
636}