Skip to main content

nova_snark/spartan/
ppsnark.rs

1//! This module implements `RelaxedR1CSSNARK` traits using a spark-based approach to prove evaluations of
2//! sparse multilinear polynomials involved in Spartan's sum-check protocol, thereby providing a preprocessing SNARK
3//! The verifier in this preprocessing SNARK maintains a commitment to R1CS matrices. This is beneficial when using a
4//! polynomial commitment scheme in which the verifier's costs is succinct.
5//! The SNARK implemented here is described in the MicroNova paper.
6use crate::traits::evm_serde::EvmCompatSerde;
7use crate::{
8  digest::{DigestComputer, SimpleDigestible},
9  errors::NovaError,
10  r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness},
11  spartan::{
12    batch_invert,
13    math::Math,
14    polys::{
15      eq::EqPolynomial,
16      identity::IdentityPolynomial,
17      masked_eq::MaskedEqPolynomial,
18      multilinear::{MultilinearPolynomial, SparsePolynomial},
19      univariate::{CompressedUniPoly, UniPoly},
20    },
21    powers,
22    sumcheck::{eq_sumcheck::EqSumCheckInstance, SumcheckEngine, SumcheckProof},
23    PolyEvalInstance, PolyEvalWitness,
24  },
25  traits::{
26    commitment::{CommitmentEngineTrait, Len},
27    evaluation::EvaluationEngineTrait,
28    snark::{DigestHelperTrait, RelaxedR1CSSNARKTrait},
29    Engine, TranscriptEngineTrait, TranscriptReprTrait,
30  },
31  zip_with, Commitment, CommitmentKey,
32};
33use core::cmp::max;
34use ff::Field;
35use itertools::Itertools as _;
36use once_cell::sync::OnceCell;
37use rayon::prelude::*;
38use serde::{Deserialize, Serialize};
39use serde_with::serde_as;
40
41fn padded<E: Engine>(v: &[E::Scalar], n: usize, e: &E::Scalar) -> Vec<E::Scalar> {
42  let mut v_padded = vec![*e; n];
43  for (i, v_i) in v.iter().enumerate() {
44    v_padded[i] = *v_i;
45  }
46  v_padded
47}
48
49/// A type that holds `R1CSShape` in a form amenable to memory checking
50#[derive(Clone, Serialize, Deserialize)]
51#[serde(bound = "")]
52pub struct R1CSShapeSparkRepr<E: Engine> {
53  /// size of the vectors
54  pub N: usize,
55
56  /// dense representation of row indices
57  pub row: Vec<E::Scalar>,
58  /// dense representation of column indices
59  pub col: Vec<E::Scalar>,
60  /// values from A matrix
61  pub val_A: Vec<E::Scalar>,
62  /// values from B matrix
63  pub val_B: Vec<E::Scalar>,
64  /// values from C matrix
65  pub val_C: Vec<E::Scalar>,
66
67  /// timestamp polynomial for rows
68  pub ts_row: Vec<E::Scalar>,
69  /// timestamp polynomial for columns
70  pub ts_col: Vec<E::Scalar>,
71}
72
73/// A type that holds a commitment to a sparse polynomial
74#[derive(Clone, Serialize, Deserialize)]
75#[serde(bound = "")]
76pub struct R1CSShapeSparkCommitment<E: Engine> {
77  /// size of each vector
78  pub N: usize,
79
80  // commitments to the dense representation
81  /// commitment to row indices
82  pub comm_row: Commitment<E>,
83  /// commitment to column indices
84  pub comm_col: Commitment<E>,
85  /// commitment to A matrix values
86  pub comm_val_A: Commitment<E>,
87  /// commitment to B matrix values
88  pub comm_val_B: Commitment<E>,
89  /// commitment to C matrix values
90  pub comm_val_C: Commitment<E>,
91
92  // commitments to the timestamp polynomials
93  /// commitment to row timestamps
94  pub comm_ts_row: Commitment<E>,
95  /// commitment to column timestamps
96  pub comm_ts_col: Commitment<E>,
97}
98
99impl<E: Engine> TranscriptReprTrait<E::GE> for R1CSShapeSparkCommitment<E> {
100  fn to_transcript_bytes(&self) -> Vec<u8> {
101    [
102      self.comm_row,
103      self.comm_col,
104      self.comm_val_A,
105      self.comm_val_B,
106      self.comm_val_C,
107      self.comm_ts_row,
108      self.comm_ts_col,
109    ]
110    .as_slice()
111    .to_transcript_bytes()
112  }
113}
114
115impl<E: Engine> R1CSShapeSparkRepr<E> {
116  /// represents `R1CSShape` in a Spark-friendly format amenable to memory checking
117  pub fn new(S: &R1CSShape<E>) -> R1CSShapeSparkRepr<E> {
118    let N = {
119      let total_nz = S.A.len() + S.B.len() + S.C.len();
120      max(total_nz, max(2 * S.num_vars, S.num_cons)).next_power_of_two()
121    };
122
123    let (mut row, mut col) = (vec![0; N], vec![N - 1; N]); // we make col lookup into the last entry of z, so we commit to zeros
124
125    for (i, (r, c, _)) in S.A.iter().chain(S.B.iter()).chain(S.C.iter()).enumerate() {
126      row[i] = r;
127      col[i] = c;
128    }
129    let val_A = {
130      let mut val = vec![E::Scalar::ZERO; N];
131      for (i, (_, _, v)) in S.A.iter().enumerate() {
132        val[i] = v;
133      }
134      val
135    };
136
137    let val_B = {
138      let mut val = vec![E::Scalar::ZERO; N];
139      for (i, (_, _, v)) in S.B.iter().enumerate() {
140        val[S.A.len() + i] = v;
141      }
142      val
143    };
144
145    let val_C = {
146      let mut val = vec![E::Scalar::ZERO; N];
147      for (i, (_, _, v)) in S.C.iter().enumerate() {
148        val[S.A.len() + S.B.len() + i] = v;
149      }
150      val
151    };
152
153    // timestamp calculation routine
154    let timestamp_calc = |num_ops: usize, num_cells: usize, addr_trace: &[usize]| -> Vec<usize> {
155      let mut ts = vec![0usize; num_cells];
156
157      assert!(num_ops >= addr_trace.len());
158      for addr in addr_trace {
159        assert!(*addr < num_cells);
160        ts[*addr] += 1;
161      }
162      ts
163    };
164
165    // timestamp polynomials for row
166    let (ts_row, ts_col) =
167      rayon::join(|| timestamp_calc(N, N, &row), || timestamp_calc(N, N, &col));
168
169    // a routine to turn a vector of usize into a vector scalars
170    let to_vec_scalar = |v: &[usize]| -> Vec<E::Scalar> {
171      (0..v.len())
172        .map(|i| E::Scalar::from(v[i] as u64))
173        .collect::<Vec<E::Scalar>>()
174    };
175
176    R1CSShapeSparkRepr {
177      N,
178
179      // dense representation
180      row: to_vec_scalar(&row),
181      col: to_vec_scalar(&col),
182      val_A,
183      val_B,
184      val_C,
185
186      // timestamp polynomials
187      ts_row: to_vec_scalar(&ts_row),
188      ts_col: to_vec_scalar(&ts_col),
189    }
190  }
191
192  /// Commits to the R1CSShapeSparkRepr using the provided commitment key
193  pub fn commit(&self, ck: &CommitmentKey<E>) -> R1CSShapeSparkCommitment<E> {
194    let comm_vec: Vec<Commitment<E>> = [
195      &self.row,
196      &self.col,
197      &self.val_A,
198      &self.val_B,
199      &self.val_C,
200      &self.ts_row,
201      &self.ts_col,
202    ]
203    .par_iter()
204    .map(|v| E::CE::commit(ck, v, &E::Scalar::ZERO))
205    .collect();
206
207    R1CSShapeSparkCommitment {
208      N: self.row.len(),
209      comm_row: comm_vec[0],
210      comm_col: comm_vec[1],
211      comm_val_A: comm_vec[2],
212      comm_val_B: comm_vec[3],
213      comm_val_C: comm_vec[4],
214      comm_ts_row: comm_vec[5],
215      comm_ts_col: comm_vec[6],
216    }
217  }
218
219  // computes evaluation oracles
220  fn evaluation_oracles(
221    &self,
222    S: &R1CSShape<E>,
223    r_outer: &[E::Scalar],
224    z: &[E::Scalar],
225  ) -> (
226    Vec<E::Scalar>,
227    Vec<E::Scalar>,
228    Vec<E::Scalar>,
229    Vec<E::Scalar>,
230  ) {
231    let mem_row = EqPolynomial::new(r_outer.to_vec()).evals();
232    let mem_col = padded::<E>(z, self.N, &E::Scalar::ZERO);
233
234    let (L_row, L_col) = {
235      let mut L_row = vec![mem_row[0]; self.N]; // we place mem_row[0] since resized row is appended with 0s
236      let mut L_col = vec![mem_col[self.N - 1]; self.N]; // we place mem_col[N-1] since resized col is appended with N-1
237
238      for (i, (val_r, val_c)) in S
239        .A
240        .iter()
241        .chain(S.B.iter())
242        .chain(S.C.iter())
243        .map(|(r, c, _)| (mem_row[r], mem_col[c]))
244        .enumerate()
245      {
246        L_row[i] = val_r;
247        L_col[i] = val_c;
248      }
249      (L_row, L_col)
250    };
251
252    (mem_row, mem_col, L_row, L_col)
253  }
254}
255
256/// The [WitnessBoundSumcheck] ensures that the witness polynomial W defined over n = log(N) variables,
257/// is zero outside of the first `num_vars = 2^m` entries.
258///
259/// # Details
260///
261/// The `W` polynomial is padded with zeros to size N = 2^n.
262/// The `masked_eq` polynomials is defined as with regards to a random challenge `tau` as
263/// the eq(tau) polynomial, where the first 2^m evaluations to 0.
264///
265/// The instance is given by
266///  `0 = ∑_{0≤i<2^n} masked_eq[i] * W[i]`.
267/// It is equivalent to the expression
268///  `0 = ∑_{2^m≤i<2^n} eq[i] * W[i]`
269/// Since `eq` is random, the instance is only satisfied if `W[2^{m}..] = 0`.
270pub struct WitnessBoundSumcheck<E: Engine> {
271  poly_W: MultilinearPolynomial<E::Scalar>,
272  poly_masked_eq: MultilinearPolynomial<E::Scalar>,
273}
274
275impl<E: Engine> WitnessBoundSumcheck<E> {
276  /// Creates a new WitnessBoundSumcheck instance
277  pub fn new(tau: Vec<E::Scalar>, poly_W_padded: Vec<E::Scalar>, num_vars: usize) -> Self {
278    let num_vars_log = num_vars.log_2();
279    // When num_vars = num_rounds, we shouldn't have to prove anything
280    // but we still want this instance to compute the evaluation of W
281    let num_rounds = poly_W_padded.len().log_2();
282    assert!(num_vars_log < num_rounds);
283
284    let poly_masked_eq_evals =
285      MaskedEqPolynomial::new(&EqPolynomial::new(tau), num_vars_log).evals();
286
287    Self {
288      poly_W: MultilinearPolynomial::new(poly_W_padded),
289      poly_masked_eq: MultilinearPolynomial::new(poly_masked_eq_evals),
290    }
291  }
292}
293impl<E: Engine> SumcheckEngine<E> for WitnessBoundSumcheck<E> {
294  fn initial_claims(&self) -> Vec<E::Scalar> {
295    vec![E::Scalar::ZERO]
296  }
297
298  fn degree(&self) -> usize {
299    3
300  }
301
302  fn size(&self) -> usize {
303    assert_eq!(self.poly_W.len(), self.poly_masked_eq.len());
304    self.poly_W.len()
305  }
306
307  fn evaluation_points(&mut self) -> Vec<Vec<E::Scalar>> {
308    // masked_eq * W is a quadratic polynomial (A * B)
309    let (eval_point_0, eval_point_inf) =
310      SumcheckProof::<E>::compute_eval_points_quadratic(&self.poly_masked_eq, &self.poly_W);
311
312    // bound_coeff is always 0 for quadratic polynomials
313    vec![vec![eval_point_0, E::Scalar::ZERO, eval_point_inf]]
314  }
315
316  fn bound(&mut self, r: &E::Scalar) {
317    [&mut self.poly_W, &mut self.poly_masked_eq]
318      .par_iter_mut()
319      .for_each(|poly| poly.bind_poly_var_top(r));
320  }
321
322  fn final_claims(&self) -> Vec<Vec<E::Scalar>> {
323    vec![vec![self.poly_W[0], self.poly_masked_eq[0]]]
324  }
325}
326
327/// Memory sumcheck instance for PPSNARK LogUp
328pub struct MemorySumcheckInstance<E: Engine> {
329  // row
330  w_plus_r_row: MultilinearPolynomial<E::Scalar>,
331  t_plus_r_row: MultilinearPolynomial<E::Scalar>,
332  t_plus_r_inv_row: MultilinearPolynomial<E::Scalar>,
333  w_plus_r_inv_row: MultilinearPolynomial<E::Scalar>,
334  ts_row: MultilinearPolynomial<E::Scalar>,
335
336  // col
337  w_plus_r_col: MultilinearPolynomial<E::Scalar>,
338  t_plus_r_col: MultilinearPolynomial<E::Scalar>,
339  t_plus_r_inv_col: MultilinearPolynomial<E::Scalar>,
340  w_plus_r_inv_col: MultilinearPolynomial<E::Scalar>,
341  ts_col: MultilinearPolynomial<E::Scalar>,
342
343  eq_sumcheck: EqSumCheckInstance<E>,
344
345  // Per-claim running claims and saved evaluation points (BDDT, eprint 2025/1117 Section 6.2)
346  running_claims: [E::Scalar; 6],
347  saved_evals: [[E::Scalar; 3]; 6],
348}
349
350impl<E: Engine> MemorySumcheckInstance<E> {
351  /// Computes witnesses for MemoryInstanceSumcheck
352  ///
353  /// # Description
354  /// We use the logUp protocol to prove that
355  /// sum TS\[i\]/(T\[i\] + r) - 1/(W\[i\] + r) = 0
356  /// where
357  ///   T_row\[i\] = mem_row\[i\]      * gamma + i
358  ///            = eq(tau)\[i\]      * gamma + i
359  ///   W_row\[i\] = L_row\[i\]        * gamma + addr_row\[i\]
360  ///            = eq(tau)\[row\[i\]\] * gamma + addr_row\[i\]
361  ///   T_col\[i\] = mem_col\[i\]      * gamma + i
362  ///            = z\[i\]            * gamma + i
363  ///   W_col\[i\] = L_col\[i\]     * gamma + addr_col\[i\]
364  ///            = z\[col\[i\]\]       * gamma + addr_col\[i\]
365  /// and
366  ///   TS_row, TS_col are integer-valued vectors representing the number of reads
367  ///   to each memory cell of L_row, L_col
368  ///
369  /// The function returns oracles for the polynomials TS\[i\]/(T\[i\] + r), 1/(W\[i\] + r),
370  /// as well as auxiliary polynomials T\[i\] + r, W\[i\] + r
371  pub fn compute_oracles(
372    ck: &CommitmentKey<E>,
373    r: &E::Scalar,
374    gamma: &E::Scalar,
375    mem_row: &[E::Scalar],
376    addr_row: &[E::Scalar],
377    L_row: &[E::Scalar],
378    ts_row: &[E::Scalar],
379    mem_col: &[E::Scalar],
380    addr_col: &[E::Scalar],
381    L_col: &[E::Scalar],
382    ts_col: &[E::Scalar],
383  ) -> Result<([Commitment<E>; 4], [Vec<E::Scalar>; 4], [Vec<E::Scalar>; 4]), NovaError> {
384    // hash the tuples of (addr,val) memory contents and read responses into a single field element using `hash_func`
385    let hash_func_vec = |mem: &[E::Scalar],
386                         addr: &[E::Scalar],
387                         lookups: &[E::Scalar]|
388     -> (Vec<E::Scalar>, Vec<E::Scalar>) {
389      let hash_func = |addr: &E::Scalar, val: &E::Scalar| -> E::Scalar { *val * gamma + *addr };
390      assert_eq!(addr.len(), lookups.len());
391      rayon::join(
392        || {
393          (0..mem.len())
394            .map(|i| hash_func(&E::Scalar::from(i as u64), &mem[i]))
395            .collect::<Vec<E::Scalar>>()
396        },
397        || {
398          (0..addr.len())
399            .map(|i| hash_func(&addr[i], &lookups[i]))
400            .collect::<Vec<E::Scalar>>()
401        },
402      )
403    };
404
405    let ((T_row, W_row), (T_col, W_col)) = rayon::join(
406      || hash_func_vec(mem_row, addr_row, L_row),
407      || hash_func_vec(mem_col, addr_col, L_col),
408    );
409
410    // compute vectors TS[i]/(T[i] + r) and 1/(W[i] + r)
411    let helper = |T: &[E::Scalar],
412                  W: &[E::Scalar],
413                  TS: &[E::Scalar],
414                  r: &E::Scalar|
415     -> Result<
416      (
417        Vec<E::Scalar>,
418        Vec<E::Scalar>,
419        Vec<E::Scalar>,
420        Vec<E::Scalar>,
421      ),
422      NovaError,
423    > {
424      let t_plus_r_and_w_plus_r = T
425        .par_iter()
426        .chain(W.par_iter())
427        .map(|e| *e + *r)
428        .collect::<Vec<E::Scalar>>();
429
430      let inv = batch_invert(&t_plus_r_and_w_plus_r)?;
431
432      let mut t_plus_r = t_plus_r_and_w_plus_r;
433      let w_plus_r = t_plus_r.split_off(T.len());
434
435      let mut t_plus_r_inv = inv;
436      let w_plus_r_inv = t_plus_r_inv.split_off(T.len());
437
438      // compute inv[i] * TS[i] in parallel
439      t_plus_r_inv = zip_with!((t_plus_r_inv.into_par_iter(), TS.par_iter()), |e1, e2| e1
440        * *e2)
441      .collect::<Vec<_>>();
442
443      Ok((t_plus_r_inv, w_plus_r_inv, t_plus_r, w_plus_r))
444    };
445
446    let (row, col) = rayon::join(
447      || helper(&T_row, &W_row, ts_row, r),
448      || helper(&T_col, &W_col, ts_col, r),
449    );
450
451    let (t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_row, w_plus_r_row) = row?;
452    let (t_plus_r_inv_col, w_plus_r_inv_col, t_plus_r_col, w_plus_r_col) = col?;
453
454    let (
455      (comm_t_plus_r_inv_row, comm_w_plus_r_inv_row),
456      (comm_t_plus_r_inv_col, comm_w_plus_r_inv_col),
457    ) = rayon::join(
458      || {
459        rayon::join(
460          || E::CE::commit(ck, &t_plus_r_inv_row, &E::Scalar::ZERO),
461          || E::CE::commit(ck, &w_plus_r_inv_row, &E::Scalar::ZERO),
462        )
463      },
464      || {
465        rayon::join(
466          || E::CE::commit(ck, &t_plus_r_inv_col, &E::Scalar::ZERO),
467          || E::CE::commit(ck, &w_plus_r_inv_col, &E::Scalar::ZERO),
468        )
469      },
470    );
471
472    let comm_vec = [
473      comm_t_plus_r_inv_row,
474      comm_w_plus_r_inv_row,
475      comm_t_plus_r_inv_col,
476      comm_w_plus_r_inv_col,
477    ];
478
479    let poly_vec = [
480      t_plus_r_inv_row,
481      w_plus_r_inv_row,
482      t_plus_r_inv_col,
483      w_plus_r_inv_col,
484    ];
485
486    let aux_poly_vec = [t_plus_r_row, w_plus_r_row, t_plus_r_col, w_plus_r_col];
487
488    Ok((comm_vec, poly_vec, aux_poly_vec))
489  }
490
491  /// Create a new memory sumcheck instance
492  pub fn new(
493    polys_oracle: [Vec<E::Scalar>; 4],
494    polys_aux: [Vec<E::Scalar>; 4],
495    rhos: Vec<E::Scalar>,
496    ts_row: Vec<E::Scalar>,
497    ts_col: Vec<E::Scalar>,
498  ) -> Self {
499    let [t_plus_r_inv_row, w_plus_r_inv_row, t_plus_r_inv_col, w_plus_r_inv_col] = polys_oracle;
500    let [t_plus_r_row, w_plus_r_row, t_plus_r_col, w_plus_r_col] = polys_aux;
501
502    Self {
503      w_plus_r_row: MultilinearPolynomial::new(w_plus_r_row),
504      t_plus_r_row: MultilinearPolynomial::new(t_plus_r_row),
505      t_plus_r_inv_row: MultilinearPolynomial::new(t_plus_r_inv_row),
506      w_plus_r_inv_row: MultilinearPolynomial::new(w_plus_r_inv_row),
507      ts_row: MultilinearPolynomial::new(ts_row),
508      w_plus_r_col: MultilinearPolynomial::new(w_plus_r_col),
509      t_plus_r_col: MultilinearPolynomial::new(t_plus_r_col),
510      t_plus_r_inv_col: MultilinearPolynomial::new(t_plus_r_inv_col),
511      w_plus_r_inv_col: MultilinearPolynomial::new(w_plus_r_inv_col),
512      ts_col: MultilinearPolynomial::new(ts_col),
513      eq_sumcheck: EqSumCheckInstance::new(rhos),
514      running_claims: [E::Scalar::ZERO; 6],
515      saved_evals: [[E::Scalar::ZERO; 3]; 6],
516    }
517  }
518}
519
520impl<E: Engine> SumcheckEngine<E> for MemorySumcheckInstance<E> {
521  fn initial_claims(&self) -> Vec<E::Scalar> {
522    vec![E::Scalar::ZERO; 6]
523  }
524
525  fn degree(&self) -> usize {
526    3
527  }
528
529  fn size(&self) -> usize {
530    // sanity checks
531    assert_eq!(self.w_plus_r_row.len(), self.t_plus_r_row.len());
532    assert_eq!(self.w_plus_r_row.len(), self.ts_row.len());
533    assert_eq!(self.w_plus_r_row.len(), self.w_plus_r_col.len());
534    assert_eq!(self.w_plus_r_row.len(), self.t_plus_r_col.len());
535    assert_eq!(self.w_plus_r_row.len(), self.ts_col.len());
536
537    self.w_plus_r_row.len()
538  }
539
540  fn evaluation_points(&mut self) -> Vec<Vec<E::Scalar>> {
541    // Pre-borrow all fields as shared references for parallel access
542    let eq = &self.eq_sumcheck;
543    let running_claims = &self.running_claims;
544    let t_plus_r_inv_row = &self.t_plus_r_inv_row;
545    let w_plus_r_inv_row = &self.w_plus_r_inv_row;
546    let t_plus_r_row = &self.t_plus_r_row;
547    let w_plus_r_row = &self.w_plus_r_row;
548    let ts_row = &self.ts_row;
549    let t_plus_r_inv_col = &self.t_plus_r_inv_col;
550    let w_plus_r_inv_col = &self.w_plus_r_inv_col;
551    let t_plus_r_col = &self.t_plus_r_col;
552    let w_plus_r_col = &self.w_plus_r_col;
553    let ts_col = &self.ts_col;
554
555    // inv related evaluation points for linear (A - B) pattern (no claim derivation)
556    // 0 = sum TS[i]/(T[i] + r) - 1/(W[i] + r)
557    let (
558      ((eval_inv_0_row, eval_inv_3_row), (eval_inv_0_col, eval_inv_3_col)),
559      (
560        ((eval_T_0_row, eval_T_2_row, eval_T_3_row), (eval_W_0_row, eval_W_2_row, eval_W_3_row)),
561        ((eval_T_0_col, eval_T_2_col, eval_T_3_col), (eval_W_0_col, eval_W_2_col, eval_W_3_col)),
562      ),
563    ) = rayon::join(
564      || {
565        rayon::join(
566          || SumcheckProof::<E>::compute_eval_points_linear(t_plus_r_inv_row, w_plus_r_inv_row),
567          || SumcheckProof::<E>::compute_eval_points_linear(t_plus_r_inv_col, w_plus_r_inv_col),
568        )
569      },
570      || {
571        rayon::join(
572          || {
573            // Row evaluation points (claim-derived, BDDT Section 6.2)
574            rayon::join(
575              || {
576                // 0 = sum eq[i] * (inv_T[i] * (T[i] + r) - TS[i]))
577                eq.evaluation_points_cubic_with_three_inputs(
578                  t_plus_r_inv_row,
579                  t_plus_r_row,
580                  ts_row,
581                  running_claims[2],
582                )
583              },
584              || {
585                // 0 = sum eq[i] * (inv_W[i] * (W[i] + r) - 1))
586                eq.evaluation_points_cubic_with_two_inputs(
587                  w_plus_r_inv_row,
588                  w_plus_r_row,
589                  running_claims[3],
590                )
591              },
592            )
593          },
594          || {
595            // Column evaluation points (claim-derived, BDDT Section 6.2)
596            rayon::join(
597              || {
598                eq.evaluation_points_cubic_with_three_inputs(
599                  t_plus_r_inv_col,
600                  t_plus_r_col,
601                  ts_col,
602                  running_claims[4],
603                )
604              },
605              || {
606                eq.evaluation_points_cubic_with_two_inputs(
607                  w_plus_r_inv_col,
608                  w_plus_r_col,
609                  running_claims[5],
610                )
611              },
612            )
613          },
614        )
615      },
616    );
617
618    // Save evaluation points for running claim updates in bound()
619    self.saved_evals = [
620      [eval_inv_0_row, E::Scalar::ZERO, eval_inv_3_row],
621      [eval_inv_0_col, E::Scalar::ZERO, eval_inv_3_col],
622      [eval_T_0_row, eval_T_2_row, eval_T_3_row],
623      [eval_W_0_row, eval_W_2_row, eval_W_3_row],
624      [eval_T_0_col, eval_T_2_col, eval_T_3_col],
625      [eval_W_0_col, eval_W_2_col, eval_W_3_col],
626    ];
627
628    self.saved_evals.iter().map(|e| e.to_vec()).collect()
629  }
630
631  fn bound(&mut self, r: &E::Scalar) {
632    for j in 0..6 {
633      self.running_claims[j] =
634        SumcheckProof::<E>::update_claim(self.running_claims[j], &self.saved_evals[j], r);
635    }
636
637    [
638      &mut self.t_plus_r_row,
639      &mut self.t_plus_r_inv_row,
640      &mut self.w_plus_r_row,
641      &mut self.w_plus_r_inv_row,
642      &mut self.ts_row,
643      &mut self.t_plus_r_col,
644      &mut self.t_plus_r_inv_col,
645      &mut self.w_plus_r_col,
646      &mut self.w_plus_r_inv_col,
647      &mut self.ts_col,
648    ]
649    .par_iter_mut()
650    .for_each(|poly| poly.bind_poly_var_top(r));
651
652    self.eq_sumcheck.bound(r);
653  }
654
655  fn final_claims(&self) -> Vec<Vec<E::Scalar>> {
656    let poly_row_final = vec![
657      self.t_plus_r_inv_row[0],
658      self.w_plus_r_inv_row[0],
659      self.ts_row[0],
660    ];
661
662    let poly_col_final = vec![
663      self.t_plus_r_inv_col[0],
664      self.w_plus_r_inv_col[0],
665      self.ts_col[0],
666    ];
667
668    vec![poly_row_final, poly_col_final]
669  }
670}
671
672/// Inner batched sumcheck instance for PPSNARK
673///
674/// Proves two claims:
675/// 1. ABC claim: `claim = Σ L_row(y) * L_col(y) * val(y)` where val = val_A + c*val_B + c²*val_C
676/// 2. E claim: `claim_E = Σ eq(r_outer, y) * E(y)` to rerandomize the E opening point
677pub struct InnerBatchedSumcheckInstance<E: Engine> {
678  /// The claim value for ABC: eval_Az + c * eval_Bz + c² * eval_Cz
679  pub claim: E::Scalar,
680  /// Row lookup polynomial
681  pub poly_L_row: MultilinearPolynomial<E::Scalar>,
682  /// Column lookup polynomial
683  pub poly_L_col: MultilinearPolynomial<E::Scalar>,
684  /// Value polynomial (val_A + c*val_B + c²*val_C)
685  pub poly_val: MultilinearPolynomial<E::Scalar>,
686
687  /// The claim value for E: eval_E_at_r_outer
688  pub claim_E: E::Scalar,
689  /// Factored eq polynomial eq(r_outer, .) (Gruen, eprint 2024/108)
690  eq_sumcheck: EqSumCheckInstance<E>,
691  /// Error polynomial E
692  pub poly_E: MultilinearPolynomial<E::Scalar>,
693
694  /// Running claim for E sub-claim (BDDT, eprint 2025/1117 Section 6.2)
695  running_claim_E: E::Scalar,
696  /// Saved eval points for E sub-claim
697  saved_evals_E: [E::Scalar; 3],
698}
699
700impl<E: Engine> InnerBatchedSumcheckInstance<E> {
701  /// Create a new inner batched sumcheck instance
702  pub fn new(
703    claim: E::Scalar,
704    L_row: Vec<E::Scalar>,
705    L_col: Vec<E::Scalar>,
706    val: Vec<E::Scalar>,
707    claim_E: E::Scalar,
708    r_outer: Vec<E::Scalar>,
709    E: Vec<E::Scalar>,
710  ) -> Self {
711    Self {
712      claim,
713      poly_L_row: MultilinearPolynomial::new(L_row),
714      poly_L_col: MultilinearPolynomial::new(L_col),
715      poly_val: MultilinearPolynomial::new(val),
716      claim_E,
717      eq_sumcheck: EqSumCheckInstance::new(r_outer),
718      poly_E: MultilinearPolynomial::new(E),
719      running_claim_E: claim_E,
720      saved_evals_E: [E::Scalar::ZERO; 3],
721    }
722  }
723}
724
725impl<E: Engine> SumcheckEngine<E> for InnerBatchedSumcheckInstance<E> {
726  fn initial_claims(&self) -> Vec<E::Scalar> {
727    vec![self.claim, self.claim_E]
728  }
729
730  fn degree(&self) -> usize {
731    3
732  }
733
734  fn size(&self) -> usize {
735    assert_eq!(self.poly_L_row.len(), self.poly_val.len());
736    assert_eq!(self.poly_L_row.len(), self.poly_L_col.len());
737    assert_eq!(self.poly_L_row.len(), self.poly_E.len());
738    self.poly_L_row.len()
739  }
740
741  fn evaluation_points(&mut self) -> Vec<Vec<E::Scalar>> {
742    let (poly_A, poly_B, poly_C) = (&self.poly_L_row, &self.poly_L_col, &self.poly_val);
743
744    // E claim: 1 N-scaling sum + claim-derived (BDDT, eprint 2025/1117 Section 6.2)
745    let ((eval_point_0, bound_coeff, eval_point_inf), (eval_E_0, eval_E_bound_coeff, eval_E_inf)) =
746      rayon::join(
747        || SumcheckProof::<E>::compute_eval_points_cubic(poly_A, poly_B, poly_C),
748        || {
749          self
750            .eq_sumcheck
751            .evaluation_points_quadratic_with_one_input(&self.poly_E, self.running_claim_E)
752        },
753      );
754
755    self.saved_evals_E = [eval_E_0, eval_E_bound_coeff, eval_E_inf];
756
757    vec![
758      vec![eval_point_0, bound_coeff, eval_point_inf],
759      // E claim is degree-2 (quadratic), so cubic coefficient is zero
760      vec![eval_E_0, E::Scalar::ZERO, eval_E_inf],
761    ]
762  }
763
764  fn bound(&mut self, r: &E::Scalar) {
765    self.running_claim_E =
766      SumcheckProof::<E>::update_claim(self.running_claim_E, &self.saved_evals_E, r);
767
768    [
769      &mut self.poly_L_row,
770      &mut self.poly_L_col,
771      &mut self.poly_val,
772      &mut self.poly_E,
773    ]
774    .par_iter_mut()
775    .for_each(|poly| poly.bind_poly_var_top(r));
776
777    self.eq_sumcheck.bound(r);
778  }
779
780  fn final_claims(&self) -> Vec<Vec<E::Scalar>> {
781    vec![
782      vec![self.poly_L_row[0], self.poly_L_col[0]],
783      vec![self.poly_E[0]],
784    ]
785  }
786}
787
788/// A type that represents the prover's key
789#[derive(Clone, Serialize, Deserialize)]
790#[serde(bound = "")]
791pub struct ProverKey<E: Engine, EE: EvaluationEngineTrait<E>> {
792  pk_ee: EE::ProverKey,
793  S_repr: R1CSShapeSparkRepr<E>,
794  S_comm: R1CSShapeSparkCommitment<E>,
795  vk_digest: E::Scalar, // digest of verifier's key
796}
797
798/// A type that represents the verifier's key
799#[derive(Clone, Serialize, Deserialize)]
800#[serde(bound = "")]
801pub struct VerifierKey<E: Engine, EE: EvaluationEngineTrait<E>> {
802  num_cons: usize,
803  num_vars: usize,
804  vk_ee: EE::VerifierKey,
805  S_comm: R1CSShapeSparkCommitment<E>,
806  #[serde(skip, default = "OnceCell::new")]
807  digest: OnceCell<E::Scalar>,
808}
809
810impl<E: Engine, EE: EvaluationEngineTrait<E>> SimpleDigestible for VerifierKey<E, EE> {}
811
812/// A succinct proof of knowledge of a witness to a relaxed R1CS instance
813/// The proof is produced using Spartan's combination of the sum-check and
814/// the commitment to a vector viewed as a polynomial commitment
815#[serde_as]
816#[derive(Clone, Debug, Serialize, Deserialize)]
817#[serde(bound = "")]
818pub struct RelaxedR1CSSNARK<E: Engine, EE: EvaluationEngineTrait<E>> {
819  // commitment to oracles for memory reads
820  comm_L_row: Commitment<E>,
821  comm_L_col: Commitment<E>,
822
823  // commitments to aid the memory checks
824  comm_t_plus_r_inv_row: Commitment<E>,
825  comm_w_plus_r_inv_row: Commitment<E>,
826  comm_t_plus_r_inv_col: Commitment<E>,
827  comm_w_plus_r_inv_col: Commitment<E>,
828
829  // outer sum-check proof
830  sc_outer: SumcheckProof<E>,
831
832  // claims about Az, Bz, and Cz virtual polynomials at the end of sum-check
833  #[serde_as(as = "EvmCompatSerde")]
834  eval_Az_at_r_outer: E::Scalar,
835  #[serde_as(as = "EvmCompatSerde")]
836  eval_Bz_at_r_outer: E::Scalar,
837  #[serde_as(as = "EvmCompatSerde")]
838  eval_Cz_at_r_outer: E::Scalar,
839  #[serde_as(as = "EvmCompatSerde")]
840  eval_E_at_r_outer: E::Scalar,
841
842  // inner batched sum-check proof
843  sc_inner_batched: SumcheckProof<E>,
844
845  // claims from the end of sum-check
846  #[serde_as(as = "EvmCompatSerde")]
847  eval_E: E::Scalar,
848  #[serde_as(as = "EvmCompatSerde")]
849  eval_L_row: E::Scalar,
850  #[serde_as(as = "EvmCompatSerde")]
851  eval_L_col: E::Scalar,
852  #[serde_as(as = "EvmCompatSerde")]
853  eval_val_A: E::Scalar,
854  #[serde_as(as = "EvmCompatSerde")]
855  eval_val_B: E::Scalar,
856  #[serde_as(as = "EvmCompatSerde")]
857  eval_val_C: E::Scalar,
858
859  #[serde_as(as = "EvmCompatSerde")]
860  eval_W: E::Scalar,
861
862  #[serde_as(as = "EvmCompatSerde")]
863  eval_t_plus_r_inv_row: E::Scalar,
864  #[serde_as(as = "EvmCompatSerde")]
865  eval_row: E::Scalar, // address
866  #[serde_as(as = "EvmCompatSerde")]
867  eval_w_plus_r_inv_row: E::Scalar,
868  #[serde_as(as = "EvmCompatSerde")]
869  eval_ts_row: E::Scalar,
870
871  #[serde_as(as = "EvmCompatSerde")]
872  eval_t_plus_r_inv_col: E::Scalar,
873  #[serde_as(as = "EvmCompatSerde")]
874  eval_col: E::Scalar, // address
875  #[serde_as(as = "EvmCompatSerde")]
876  eval_w_plus_r_inv_col: E::Scalar,
877  #[serde_as(as = "EvmCompatSerde")]
878  eval_ts_col: E::Scalar,
879
880  // a PCS evaluation argument
881  eval_arg: EE::EvaluationArgument,
882}
883
884impl<E: Engine, EE: EvaluationEngineTrait<E>> RelaxedR1CSSNARK<E, EE> {
885  /// Batched inner sum-check prover for 3 instances: memory, inner_batched, and witness.
886  fn prove_helper<T1, T2, T3>(
887    mem: &mut T1,
888    inner: &mut T2,
889    witness: &mut T3,
890    transcript: &mut E::TE,
891  ) -> Result<
892    (
893      SumcheckProof<E>,
894      Vec<E::Scalar>,
895      Vec<Vec<E::Scalar>>,
896      Vec<Vec<E::Scalar>>,
897      Vec<Vec<E::Scalar>>,
898    ),
899    NovaError,
900  >
901  where
902    T1: SumcheckEngine<E>,
903    T2: SumcheckEngine<E>,
904    T3: SumcheckEngine<E>,
905  {
906    // sanity checks
907    assert_eq!(mem.size(), inner.size());
908    assert_eq!(mem.size(), witness.size());
909    assert_eq!(mem.degree(), inner.degree());
910    assert_eq!(mem.degree(), witness.degree());
911
912    // these claims are already added to the transcript, so we do not need to add
913    let claims = mem
914      .initial_claims()
915      .into_iter()
916      .chain(inner.initial_claims())
917      .chain(witness.initial_claims())
918      .collect::<Vec<E::Scalar>>();
919
920    let s = transcript.squeeze(b"r")?;
921    let coeffs = powers::<E>(&s, claims.len());
922
923    // compute the joint claim
924    let claim = zip_with!((claims.iter(), coeffs.iter()), |c_1, c_2| *c_1 * c_2).sum();
925
926    let mut e = claim;
927    let mut r: Vec<E::Scalar> = Vec::new();
928    let mut cubic_polys: Vec<CompressedUniPoly<E::Scalar>> = Vec::new();
929    let num_rounds = mem.size().log_2();
930    for _ in 0..num_rounds {
931      let (evals_mem, (evals_inner, evals_witness)) = rayon::join(
932        || mem.evaluation_points(),
933        || rayon::join(|| inner.evaluation_points(), || witness.evaluation_points()),
934      );
935
936      let evals: Vec<Vec<E::Scalar>> = evals_mem
937        .into_iter()
938        .chain(evals_inner.into_iter())
939        .chain(evals_witness.into_iter())
940        .collect::<Vec<Vec<E::Scalar>>>();
941      assert_eq!(evals.len(), claims.len());
942
943      let evals_combined_0 = (0..evals.len()).map(|i| evals[i][0] * coeffs[i]).sum();
944      let evals_combined_bound_coeff = (0..evals.len()).map(|i| evals[i][1] * coeffs[i]).sum();
945      let evals_combined_inf = (0..evals.len()).map(|i| evals[i][2] * coeffs[i]).sum();
946
947      let evals = vec![
948        evals_combined_0,
949        e - evals_combined_0,
950        evals_combined_bound_coeff,
951        evals_combined_inf,
952      ];
953
954      let poly = UniPoly::from_evals_deg3(&evals);
955
956      // append the prover's message to the transcript
957      transcript.absorb(b"p", &poly);
958
959      // derive the verifier's challenge for the next round
960      let r_i = transcript.squeeze(b"c")?;
961      r.push(r_i);
962
963      let _ = rayon::join(
964        || mem.bound(&r_i),
965        || rayon::join(|| inner.bound(&r_i), || witness.bound(&r_i)),
966      );
967
968      e = poly.evaluate(&r_i);
969      cubic_polys.push(poly.compress());
970    }
971
972    let mem_claims = mem.final_claims();
973    let inner_claims = inner.final_claims();
974    let witness_claims = witness.final_claims();
975
976    Ok((
977      SumcheckProof::new(cubic_polys),
978      r,
979      mem_claims,
980      inner_claims,
981      witness_claims,
982    ))
983  }
984}
985
986impl<E: Engine, EE: EvaluationEngineTrait<E>> VerifierKey<E, EE> {
987  fn new(
988    num_cons: usize,
989    num_vars: usize,
990    S_comm: R1CSShapeSparkCommitment<E>,
991    vk_ee: EE::VerifierKey,
992  ) -> Self {
993    VerifierKey {
994      num_cons,
995      num_vars,
996      S_comm,
997      vk_ee,
998      digest: Default::default(),
999    }
1000  }
1001}
1002impl<E: Engine, EE: EvaluationEngineTrait<E>> DigestHelperTrait<E> for VerifierKey<E, EE> {
1003  /// Returns the digest of the verifier's key
1004  fn digest(&self) -> E::Scalar {
1005    self
1006      .digest
1007      .get_or_try_init(|| {
1008        let dc = DigestComputer::new(self);
1009        dc.digest()
1010      })
1011      .cloned()
1012      .expect("Failure to retrieve digest!")
1013  }
1014}
1015
1016impl<E: Engine, EE: EvaluationEngineTrait<E>> RelaxedR1CSSNARKTrait<E> for RelaxedR1CSSNARK<E, EE> {
1017  type ProverKey = ProverKey<E, EE>;
1018  type VerifierKey = VerifierKey<E, EE>;
1019
1020  fn ck_floor() -> Box<dyn for<'a> Fn(&'a R1CSShape<E>) -> usize> {
1021    Box::new(|shape: &R1CSShape<E>| -> usize {
1022      // the commitment key should be large enough to commit to the R1CS matrices
1023      shape.A.len() + shape.B.len() + shape.C.len()
1024    })
1025  }
1026
1027  fn setup(
1028    ck: &CommitmentKey<E>,
1029    S: &R1CSShape<E>,
1030  ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> {
1031    // check the provided commitment key meets minimal requirements
1032    if ck.length() < Self::ck_floor()(S) {
1033      return Err(NovaError::InvalidCommitmentKeyLength);
1034    }
1035    let (pk_ee, vk_ee) = EE::setup(ck)?;
1036
1037    // pad the R1CS matrices
1038    let S = S.pad();
1039
1040    let S_repr = R1CSShapeSparkRepr::new(&S);
1041    let S_comm = S_repr.commit(ck);
1042
1043    let vk = VerifierKey::new(S.num_cons, S.num_vars, S_comm.clone(), vk_ee);
1044
1045    let pk = ProverKey {
1046      pk_ee,
1047      S_repr,
1048      S_comm,
1049      vk_digest: vk.digest(),
1050    };
1051
1052    Ok((pk, vk))
1053  }
1054
1055  /// produces a succinct proof of satisfiability of a `RelaxedR1CS` instance
1056  fn prove(
1057    ck: &CommitmentKey<E>,
1058    pk: &Self::ProverKey,
1059    S: &R1CSShape<E>,
1060    U: &RelaxedR1CSInstance<E>,
1061    W: &RelaxedR1CSWitness<E>,
1062  ) -> Result<Self, NovaError> {
1063    // pad the R1CSShape
1064    let S = S.pad();
1065    // sanity check that R1CSShape has all required size characteristics
1066    assert!(S.is_regular_shape());
1067
1068    let W = W.pad(&S); // pad the witness
1069    let mut transcript = E::TE::new(b"RelaxedR1CSSNARK");
1070
1071    // append the verifier key (which includes commitment to R1CS matrices) and the RelaxedR1CSInstance to the transcript
1072    transcript.absorb(b"vk", &pk.vk_digest);
1073    transcript.absorb(b"U", U);
1074
1075    // compute the full satisfying assignment by concatenating W.W, U.u, and U.X
1076    let z = [W.W.clone(), vec![U.u], U.X.clone()].concat();
1077
1078    // compute Az, Bz, Cz
1079    let (Az, Bz, Cz) = S.multiply_vec(&z)?;
1080
1081    // Shortened outer sum-check: run for log(num_cons) rounds instead of log(N).
1082    // Az, Bz, Cz, E are naturally size num_cons and zero-padded to N, so the
1083    // sum over {0,1}^log(N) collapses to {0,1}^log(num_cons).
1084    let num_rounds_outer = S.num_cons.log_2();
1085    let num_rounds_inner = pk.S_repr.N.log_2();
1086    let tau = (0..num_rounds_outer)
1087      .map(|_| transcript.squeeze(b"t"))
1088      .collect::<Result<Vec<_>, NovaError>>()?;
1089
1090    // Step 1: Outer sum-check (standalone, degree-3) on size-m polynomials
1091    // Proves: 0 = Σ_{x∈{0,1}^log(m)} eq(τ,x) * (Az(x) * Bz(x) - (u·Cz(x) + E(x)))
1092    let uCz_E: Vec<E::Scalar> = Cz
1093      .iter()
1094      .zip(W.E.iter())
1095      .map(|(cz, e)| U.u * *cz + *e)
1096      .collect();
1097    let mut poly_Az = MultilinearPolynomial::new(Az);
1098    let mut poly_Bz = MultilinearPolynomial::new(Bz);
1099    let mut poly_uCz_E = MultilinearPolynomial::new(uCz_E);
1100
1101    let (sc_outer, r_outer, claims_outer) = SumcheckProof::<E>::prove_cubic_with_three_inputs(
1102      &E::Scalar::ZERO,
1103      tau,
1104      &mut poly_Az,
1105      &mut poly_Bz,
1106      &mut poly_uCz_E,
1107      &mut transcript,
1108    )?;
1109
1110    // Claims from the shortened outer sum-check (evaluations at the log(m)-length point)
1111    let eval_Az_at_r_outer = claims_outer[0];
1112    let eval_Bz_at_r_outer = claims_outer[1];
1113    let eval_Cz_at_r_outer = MultilinearPolynomial::evaluate_with(&Cz, &r_outer);
1114    let eval_E_at_r_outer = claims_outer[2] - U.u * eval_Cz_at_r_outer;
1115
1116    // Absorb outer sum-check claims into transcript
1117    transcript.absorb(
1118      b"e",
1119      &[
1120        eval_Az_at_r_outer,
1121        eval_Bz_at_r_outer,
1122        eval_Cz_at_r_outer,
1123        eval_E_at_r_outer,
1124      ]
1125      .as_slice(),
1126    );
1127
1128    // Squeeze random padding challenges and extend r_outer to length log(N).
1129    // For zero-padded polys P of size m within N: P(r_outer_full) = factor · P(r_outer)
1130    // where factor = Π(1 - r_pad_j) and r_outer_full = (r_pad, r_outer).
1131    // r_pad occupies the top (MSB) positions since padding variables are the high-order bits.
1132    let num_pad_rounds = num_rounds_inner
1133      .checked_sub(num_rounds_outer)
1134      .ok_or(NovaError::InvalidSumcheckProof)?;
1135    let r_pad = (0..num_pad_rounds)
1136      .map(|_| transcript.squeeze(b"p"))
1137      .collect::<Result<Vec<E::Scalar>, NovaError>>()?;
1138    let r_outer_full: Vec<E::Scalar> = r_pad.iter().chain(r_outer.iter()).cloned().collect();
1139    let factor: E::Scalar = r_pad
1140      .iter()
1141      .fold(E::Scalar::ONE, |acc, r| acc * (E::Scalar::ONE - r));
1142
1143    // Pad E and W to size N for inner sum-check and PCS
1144    let E = padded::<E>(&W.E, pk.S_repr.N, &E::Scalar::ZERO);
1145    let W = padded::<E>(&W.W, pk.S_repr.N, &E::Scalar::ZERO);
1146
1147    // -----------------------------------------------------------------------
1148    // Step 2: Prepare the batched inner batched sum-check (memory + inner_batched + witness)
1149    // -----------------------------------------------------------------------
1150
1151    // Compute evaluation oracles at r_outer_full (the extended outer challenge)
1152    // L_row(i) = eq(r_outer_full, row(i)) for all i
1153    // L_col(i) = z(col(i)) for all i, where z is the full satisfying assignment
1154    let (mem_row, mem_col, L_row, L_col) = pk.S_repr.evaluation_oracles(&S, &r_outer_full, &z);
1155    let (comm_L_row, comm_L_col) = rayon::join(
1156      || E::CE::commit(ck, &L_row, &E::Scalar::ZERO),
1157      || E::CE::commit(ck, &L_col, &E::Scalar::ZERO),
1158    );
1159
1160    // Absorb commitments to L_row and L_col
1161    transcript.absorb(b"e", &vec![comm_L_row, comm_L_col].as_slice());
1162
1163    // Squeeze challenge for batching inner batched ABC claims
1164    let c = transcript.squeeze(b"c")?;
1165
1166    // Squeeze challenges for memory sum-check
1167    let gamma = transcript.squeeze(b"g")?;
1168    let r = transcript.squeeze(b"r")?;
1169
1170    let (mut inner_batched_sc_inst, mem_res) = rayon::join(
1171      || {
1172        // Inner batched sum-check instance for:
1173        // (a) ABC claim: factor·(v_A + c·v_B + c²·v_C) = Σ L_row(y) * (val_A + c·val_B + c²·val_C)(y) * L_col(y)
1174        // (b) E claim: factor·eval_E = Σ eq(r_outer_full, y) * E(y)
1175        // The claims are scaled by factor because the inner sum-check uses r_outer_full
1176        // and eq(r_outer_full, j) = factor · eq(r_outer, j) for j < m.
1177        let val = zip_with!(
1178          par_iter,
1179          (pk.S_repr.val_A, pk.S_repr.val_B, pk.S_repr.val_C),
1180          |v_a, v_b, v_c| *v_a + c * *v_b + c * c * *v_c
1181        )
1182        .collect::<Vec<E::Scalar>>();
1183
1184        InnerBatchedSumcheckInstance::new(
1185          factor * (eval_Az_at_r_outer + c * eval_Bz_at_r_outer + c * c * eval_Cz_at_r_outer),
1186          L_row.clone(),
1187          L_col.clone(),
1188          val,
1189          factor * eval_E_at_r_outer,
1190          r_outer_full.clone(), // eq challenges for factored eq (Gruen)
1191          E.clone(),
1192        )
1193      },
1194      || {
1195        // Memory sum-check instance to prove L_row and L_col are well-formed
1196        let (comm_mem_oracles, mem_oracles, mem_aux) =
1197          MemorySumcheckInstance::<E>::compute_oracles(
1198            ck,
1199            &r,
1200            &gamma,
1201            &mem_row,
1202            &pk.S_repr.row,
1203            &L_row,
1204            &pk.S_repr.ts_row,
1205            &mem_col,
1206            &pk.S_repr.col,
1207            &L_col,
1208            &pk.S_repr.ts_col,
1209          )?;
1210        // absorb the commitments
1211        transcript.absorb(b"l", &comm_mem_oracles.as_slice());
1212
1213        let rho = (0..num_rounds_inner)
1214          .map(|_| transcript.squeeze(b"r"))
1215          .collect::<Result<Vec<_>, NovaError>>()?;
1216
1217        Ok::<_, NovaError>((
1218          MemorySumcheckInstance::new(
1219            mem_oracles.clone(),
1220            mem_aux,
1221            rho,
1222            pk.S_repr.ts_row.clone(),
1223            pk.S_repr.ts_col.clone(),
1224          ),
1225          comm_mem_oracles,
1226          mem_oracles,
1227        ))
1228      },
1229    );
1230
1231    let (mut mem_sc_inst, comm_mem_oracles, mem_oracles) = mem_res?;
1232
1233    // Witness bound sum-check using r_outer_full as the random evaluation point
1234    let mut witness_sc_inst =
1235      WitnessBoundSumcheck::new(r_outer_full.clone(), W.clone(), S.num_vars);
1236
1237    // -----------------------------------------------------------------------
1238    // Step 3: Run the batched inner batched sum-check (3 instances)
1239    // -----------------------------------------------------------------------
1240    let (sc_inner_batched, r_inner_batched, claims_mem, claims_inner_batched, claims_witness) =
1241      Self::prove_helper(
1242        &mut mem_sc_inst,
1243        &mut inner_batched_sc_inst,
1244        &mut witness_sc_inst,
1245        &mut transcript,
1246      )?;
1247
1248    // Claims from the inner batched sum-check
1249    let eval_L_row = claims_inner_batched[0][0];
1250    let eval_L_col = claims_inner_batched[0][1];
1251    let eval_E = claims_inner_batched[1][0]; // E(r_inner_batched) — rerandomized to open at same point
1252
1253    let eval_t_plus_r_inv_row = claims_mem[0][0];
1254    let eval_w_plus_r_inv_row = claims_mem[0][1];
1255    let eval_ts_row = claims_mem[0][2];
1256
1257    let eval_t_plus_r_inv_col = claims_mem[1][0];
1258    let eval_w_plus_r_inv_col = claims_mem[1][1];
1259    let eval_ts_col = claims_mem[1][2];
1260    let eval_W = claims_witness[0][0];
1261
1262    // Compute evaluations at r_inner_batched that did not come for free from the sum-check
1263    let (eval_val_A, eval_val_B, eval_val_C, eval_row, eval_col) = {
1264      let e = MultilinearPolynomial::multi_evaluate_with(
1265        &[
1266          &pk.S_repr.val_A,
1267          &pk.S_repr.val_B,
1268          &pk.S_repr.val_C,
1269          &pk.S_repr.row,
1270          &pk.S_repr.col,
1271        ],
1272        &r_inner_batched,
1273      );
1274      (e[0], e[1], e[2], e[3], e[4])
1275    };
1276
1277    // All evaluations are at r_inner_batched — fold into one claim for batch PCS opening
1278    let eval_vec = vec![
1279      eval_W,
1280      eval_E,
1281      eval_L_row,
1282      eval_L_col,
1283      eval_val_A,
1284      eval_val_B,
1285      eval_val_C,
1286      eval_t_plus_r_inv_row,
1287      eval_row,
1288      eval_w_plus_r_inv_row,
1289      eval_ts_row,
1290      eval_t_plus_r_inv_col,
1291      eval_col,
1292      eval_w_plus_r_inv_col,
1293      eval_ts_col,
1294    ];
1295
1296    let comm_vec = [
1297      U.comm_W,
1298      U.comm_E,
1299      comm_L_row,
1300      comm_L_col,
1301      pk.S_comm.comm_val_A,
1302      pk.S_comm.comm_val_B,
1303      pk.S_comm.comm_val_C,
1304      comm_mem_oracles[0],
1305      pk.S_comm.comm_row,
1306      comm_mem_oracles[1],
1307      pk.S_comm.comm_ts_row,
1308      comm_mem_oracles[2],
1309      pk.S_comm.comm_col,
1310      comm_mem_oracles[3],
1311      pk.S_comm.comm_ts_col,
1312    ];
1313    let poly_vec = [
1314      &W,
1315      &E,
1316      &L_row,
1317      &L_col,
1318      &pk.S_repr.val_A,
1319      &pk.S_repr.val_B,
1320      &pk.S_repr.val_C,
1321      mem_oracles[0].as_ref(),
1322      &pk.S_repr.row,
1323      mem_oracles[1].as_ref(),
1324      &pk.S_repr.ts_row,
1325      mem_oracles[2].as_ref(),
1326      &pk.S_repr.col,
1327      mem_oracles[3].as_ref(),
1328      &pk.S_repr.ts_col,
1329    ];
1330    transcript.absorb(b"e", &eval_vec.as_slice()); // comm_vec is already in the transcript
1331    let c = transcript.squeeze(b"c")?;
1332    let w: PolyEvalWitness<E> = PolyEvalWitness::batch(&poly_vec, &c);
1333    let u: PolyEvalInstance<E> =
1334      PolyEvalInstance::batch(&comm_vec, &r_inner_batched, &eval_vec, &c);
1335
1336    let eval_arg = EE::prove(
1337      ck,
1338      &pk.pk_ee,
1339      &mut transcript,
1340      &u.c,
1341      &w.p,
1342      &r_inner_batched,
1343      &u.e,
1344    )?;
1345
1346    Ok(RelaxedR1CSSNARK {
1347      comm_L_row,
1348      comm_L_col,
1349
1350      comm_t_plus_r_inv_row: comm_mem_oracles[0],
1351      comm_w_plus_r_inv_row: comm_mem_oracles[1],
1352      comm_t_plus_r_inv_col: comm_mem_oracles[2],
1353      comm_w_plus_r_inv_col: comm_mem_oracles[3],
1354
1355      sc_outer,
1356
1357      eval_Az_at_r_outer,
1358      eval_Bz_at_r_outer,
1359      eval_Cz_at_r_outer,
1360      eval_E_at_r_outer,
1361
1362      sc_inner_batched,
1363
1364      eval_E,
1365      eval_L_row,
1366      eval_L_col,
1367      eval_val_A,
1368      eval_val_B,
1369      eval_val_C,
1370
1371      eval_W,
1372
1373      eval_t_plus_r_inv_row,
1374      eval_row,
1375      eval_w_plus_r_inv_row,
1376      eval_ts_row,
1377
1378      eval_col,
1379      eval_t_plus_r_inv_col,
1380      eval_w_plus_r_inv_col,
1381      eval_ts_col,
1382
1383      eval_arg,
1384    })
1385  }
1386
1387  /// verifies a proof of satisfiability of a `RelaxedR1CS` instance
1388  fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance<E>) -> Result<(), NovaError> {
1389    let mut transcript = E::TE::new(b"RelaxedR1CSSNARK");
1390
1391    // append the verifier key (including commitment to R1CS matrices) and the RelaxedR1CSInstance to the transcript
1392    transcript.absorb(b"vk", &vk.digest());
1393    transcript.absorb(b"U", U);
1394
1395    // Shortened outer sum-check runs for log(num_cons) rounds
1396    let num_rounds_outer = vk.num_cons.log_2();
1397    let num_rounds_inner = vk.S_comm.N.log_2();
1398    let tau = (0..num_rounds_outer)
1399      .map(|_| transcript.squeeze(b"t"))
1400      .collect::<Result<Vec<_>, NovaError>>()?;
1401
1402    // -----------------------------------------------------------------------
1403    // Step 1: Verify the shortened outer sum-check
1404    // Claim: 0 = Σ_{x∈{0,1}^log(m)} eq(τ,x) * (Az(x) * Bz(x) - (u·Cz(x) + E(x)))
1405    // -----------------------------------------------------------------------
1406    let (claim_sc_outer_final, r_outer) =
1407      self
1408        .sc_outer
1409        .verify(E::Scalar::ZERO, num_rounds_outer, 3, &mut transcript)?;
1410
1411    // Check outer sum-check final claim
1412    let eq_tau_at_r_outer = EqPolynomial::new(tau).evaluate(&r_outer);
1413    let claim_sc_outer_expected = eq_tau_at_r_outer
1414      * (self.eval_Az_at_r_outer * self.eval_Bz_at_r_outer
1415        - U.u * self.eval_Cz_at_r_outer
1416        - self.eval_E_at_r_outer);
1417    if claim_sc_outer_expected != claim_sc_outer_final {
1418      return Err(NovaError::InvalidSumcheckProof);
1419    }
1420
1421    // Absorb outer sum-check claims
1422    transcript.absorb(
1423      b"e",
1424      &[
1425        self.eval_Az_at_r_outer,
1426        self.eval_Bz_at_r_outer,
1427        self.eval_Cz_at_r_outer,
1428        self.eval_E_at_r_outer,
1429      ]
1430      .as_slice(),
1431    );
1432
1433    // Squeeze random padding challenges and extend r_outer to length log(N)
1434    // r_pad occupies the top (MSB) positions since padding variables are the high-order bits.
1435    let num_pad_rounds = num_rounds_inner
1436      .checked_sub(num_rounds_outer)
1437      .ok_or(NovaError::InvalidSumcheckProof)?;
1438    let r_pad = (0..num_pad_rounds)
1439      .map(|_| transcript.squeeze(b"p"))
1440      .collect::<Result<Vec<E::Scalar>, NovaError>>()?;
1441    let r_outer_full: Vec<E::Scalar> = r_pad.iter().chain(r_outer.iter()).cloned().collect();
1442    let factor: E::Scalar = r_pad
1443      .iter()
1444      .fold(E::Scalar::ONE, |acc, r| acc * (E::Scalar::ONE - r));
1445
1446    // -----------------------------------------------------------------------
1447    // Step 2: Verify the batched inner batched sum-check (memory + inner_batched + witness)
1448    // -----------------------------------------------------------------------
1449
1450    // Absorb commitments to L_row and L_col
1451    transcript.absorb(b"e", &vec![self.comm_L_row, self.comm_L_col].as_slice());
1452
1453    // Squeeze challenge for batching inner batched ABC claims
1454    let c = transcript.squeeze(b"c")?;
1455
1456    let gamma = transcript.squeeze(b"g")?;
1457    let r = transcript.squeeze(b"r")?;
1458
1459    transcript.absorb(
1460      b"l",
1461      &vec![
1462        self.comm_t_plus_r_inv_row,
1463        self.comm_w_plus_r_inv_row,
1464        self.comm_t_plus_r_inv_col,
1465        self.comm_w_plus_r_inv_col,
1466      ]
1467      .as_slice(),
1468    );
1469
1470    let rho = (0..num_rounds_inner)
1471      .map(|_| transcript.squeeze(b"r"))
1472      .collect::<Result<Vec<_>, NovaError>>()?;
1473
1474    // 9 claims: 6 memory + 2 inner (ABC + E) + 1 witness
1475    let num_claims = 9;
1476    let s = transcript.squeeze(b"r")?;
1477    let coeffs = powers::<E>(&s, num_claims);
1478
1479    // Compute the combined initial claim
1480    // Claims 0-5: memory (all zero)
1481    // Claim 6: inner ABC = factor * (eval_Az + c * eval_Bz + c² * eval_Cz)
1482    // Claim 7: inner E = factor * eval_E
1483    // Claim 8: witness (zero)
1484    // The factor accounts for zero-padding: eval_P(r_outer_full) = factor * eval_P(r_outer)
1485    let claim_inner_batched_ABC = factor
1486      * (self.eval_Az_at_r_outer + c * self.eval_Bz_at_r_outer + c * c * self.eval_Cz_at_r_outer);
1487    let claim = coeffs[6] * claim_inner_batched_ABC + coeffs[7] * factor * self.eval_E_at_r_outer;
1488
1489    // Verify inner batched sum-check
1490    let (claim_sc_inner_batched_final, r_inner_batched) =
1491      self
1492        .sc_inner_batched
1493        .verify(claim, num_rounds_inner, 3, &mut transcript)?;
1494
1495    // Verify inner batched sum-check final claim
1496    let claim_sc_inner_batched_expected = {
1497      let rand_eq_bound_r_inner_batched = EqPolynomial::new(rho).evaluate(&r_inner_batched);
1498
1499      // eq(r_outer_full, r_inner_batched) for the E claim and memory row address check
1500      let eq_r_outer = EqPolynomial::new(r_outer_full.clone());
1501      let eq_r_outer_at_r_inner_batched = eq_r_outer.evaluate(&r_inner_batched);
1502
1503      // masked eq for witness bound check (using r_outer_full as random point)
1504      let taus_masked_bound_r_inner_batched =
1505        MaskedEqPolynomial::new(&eq_r_outer, vk.num_vars.log_2()).evaluate(&r_inner_batched);
1506
1507      let eval_t_plus_r_row = {
1508        let eval_addr_row = IdentityPolynomial::new(num_rounds_inner).evaluate(&r_inner_batched);
1509        let eval_val_row = eq_r_outer_at_r_inner_batched; // mem_row = eq(r_outer_full, ·)
1510        let eval_t = eval_addr_row + gamma * eval_val_row;
1511        eval_t + r
1512      };
1513
1514      let eval_w_plus_r_row = {
1515        let eval_addr_row = self.eval_row;
1516        let eval_val_row = self.eval_L_row;
1517        let eval_w = eval_addr_row + gamma * eval_val_row;
1518        eval_w + r
1519      };
1520
1521      let eval_t_plus_r_col = {
1522        let eval_addr_col = IdentityPolynomial::new(num_rounds_inner).evaluate(&r_inner_batched);
1523
1524        // memory contents is z, so we compute eval_Z from eval_W and eval_X
1525        let eval_val_col = {
1526          // r_inner_batched was padded, so we now remove the padding
1527          let (factor, r_inner_batched_unpad) = {
1528            let l = vk.S_comm.N.log_2() - (2 * vk.num_vars).log_2();
1529
1530            let mut factor = E::Scalar::ONE;
1531            for r_p in r_inner_batched.iter().take(l) {
1532              factor *= E::Scalar::ONE - r_p
1533            }
1534
1535            let r_inner_batched_unpad = r_inner_batched[l..].to_vec();
1536
1537            (factor, r_inner_batched_unpad)
1538          };
1539
1540          let eval_X = {
1541            // public IO is (u, X)
1542            let X = vec![U.u]
1543              .into_iter()
1544              .chain(U.X.iter().cloned())
1545              .collect::<Vec<E::Scalar>>();
1546
1547            // evaluate the sparse polynomial at r_inner_batched_unpad[1..]
1548            let poly_X = SparsePolynomial::new(r_inner_batched_unpad.len() - 1, X);
1549            poly_X.evaluate(&r_inner_batched_unpad[1..])
1550          };
1551
1552          self.eval_W + factor * r_inner_batched_unpad[0] * eval_X
1553        };
1554        let eval_t = eval_addr_col + gamma * eval_val_col;
1555        eval_t + r
1556      };
1557
1558      let eval_w_plus_r_col = {
1559        let eval_addr_col = self.eval_col;
1560        let eval_val_col = self.eval_L_col;
1561        let eval_w = eval_addr_col + gamma * eval_val_col;
1562        eval_w + r
1563      };
1564
1565      // Memory claims (coeffs 0-5)
1566      let claim_mem_final_expected: E::Scalar = coeffs[0]
1567        * (self.eval_t_plus_r_inv_row - self.eval_w_plus_r_inv_row)
1568        + coeffs[1] * (self.eval_t_plus_r_inv_col - self.eval_w_plus_r_inv_col)
1569        + coeffs[2]
1570          * (rand_eq_bound_r_inner_batched
1571            * (self.eval_t_plus_r_inv_row * eval_t_plus_r_row - self.eval_ts_row))
1572        + coeffs[3]
1573          * (rand_eq_bound_r_inner_batched
1574            * (self.eval_w_plus_r_inv_row * eval_w_plus_r_row - E::Scalar::ONE))
1575        + coeffs[4]
1576          * (rand_eq_bound_r_inner_batched
1577            * (self.eval_t_plus_r_inv_col * eval_t_plus_r_col - self.eval_ts_col))
1578        + coeffs[5]
1579          * (rand_eq_bound_r_inner_batched
1580            * (self.eval_w_plus_r_inv_col * eval_w_plus_r_col - E::Scalar::ONE));
1581
1582      // Inner batched ABC claim (coeff 6): L_row * L_col * (val_A + c·val_B + c²·val_C)
1583      let claim_inner_batched_ABC_final = coeffs[6]
1584        * self.eval_L_row
1585        * self.eval_L_col
1586        * (self.eval_val_A + c * self.eval_val_B + c * c * self.eval_val_C);
1587
1588      // Inner batched E claim (coeff 7): eq(r_outer_full, r_inner_batched) * E(r_inner_batched)
1589      let claim_inner_batched_E_final = coeffs[7] * eq_r_outer_at_r_inner_batched * self.eval_E;
1590
1591      // Witness claim (coeff 8)
1592      let claim_witness_final = coeffs[8] * taus_masked_bound_r_inner_batched * self.eval_W;
1593
1594      claim_mem_final_expected
1595        + claim_inner_batched_ABC_final
1596        + claim_inner_batched_E_final
1597        + claim_witness_final
1598    };
1599
1600    if claim_sc_inner_batched_expected != claim_sc_inner_batched_final {
1601      return Err(NovaError::InvalidSumcheckProof);
1602    }
1603
1604    // Verify polynomial openings — all at r_inner_batched
1605    let eval_vec = vec![
1606      self.eval_W,
1607      self.eval_E,
1608      self.eval_L_row,
1609      self.eval_L_col,
1610      self.eval_val_A,
1611      self.eval_val_B,
1612      self.eval_val_C,
1613      self.eval_t_plus_r_inv_row,
1614      self.eval_row,
1615      self.eval_w_plus_r_inv_row,
1616      self.eval_ts_row,
1617      self.eval_t_plus_r_inv_col,
1618      self.eval_col,
1619      self.eval_w_plus_r_inv_col,
1620      self.eval_ts_col,
1621    ];
1622    let comm_vec = [
1623      U.comm_W,
1624      U.comm_E,
1625      self.comm_L_row,
1626      self.comm_L_col,
1627      vk.S_comm.comm_val_A,
1628      vk.S_comm.comm_val_B,
1629      vk.S_comm.comm_val_C,
1630      self.comm_t_plus_r_inv_row,
1631      vk.S_comm.comm_row,
1632      self.comm_w_plus_r_inv_row,
1633      vk.S_comm.comm_ts_row,
1634      self.comm_t_plus_r_inv_col,
1635      vk.S_comm.comm_col,
1636      self.comm_w_plus_r_inv_col,
1637      vk.S_comm.comm_ts_col,
1638    ];
1639    transcript.absorb(b"e", &eval_vec.as_slice()); // comm_vec is already in the transcript
1640    let c = transcript.squeeze(b"c")?;
1641    let u: PolyEvalInstance<E> =
1642      PolyEvalInstance::batch(&comm_vec, &r_inner_batched, &eval_vec, &c);
1643
1644    // verify
1645    EE::verify(
1646      &vk.vk_ee,
1647      &mut transcript,
1648      &u.c,
1649      &r_inner_batched,
1650      &u.e,
1651      &self.eval_arg,
1652    )?;
1653
1654    Ok(())
1655  }
1656}