Skip to main content

nova_snark/provider/
hyperkzg.rs

1//! This module implements Nova's evaluation engine using `HyperKZG`, a KZG-based polynomial commitment for multilinear polynomials
2//! HyperKZG is based on the transformation from univariate PCS to multilinear PCS in the Gemini paper (section 2.4.2 in <https://eprint.iacr.org/2022/420.pdf>).
3//! However, there are some key differences:
4//! (1) HyperKZG works with multilinear polynomials represented in evaluation form (rather than in coefficient form in Gemini's transformation).
5//! This means that Spartan's polynomial IOP can use commit to its polynomials as-is without incurring any interpolations or FFTs.
6//! (2) HyperKZG is specialized to use KZG as the univariate commitment scheme, so it includes several optimizations (both during the transformation of multilinear-to-univariate claims
7//! and within the KZG commitment scheme implementation itself).
8#![allow(non_snake_case)]
9#[cfg(feature = "io")]
10use crate::provider::{ptau::PtauFileError, read_ptau, write_ptau};
11use crate::{
12  errors::NovaError,
13  gadgets::utils::to_bignat_repr,
14  provider::{
15    msm::batch_add,
16    traits::{DlogGroup, DlogGroupExt, PairingGroup},
17  },
18  traits::{
19    commitment::{CommitmentEngineTrait, CommitmentTrait, Len},
20    evaluation::EvaluationEngineTrait,
21    evm_serde::EvmCompatSerde,
22    AbsorbInRO2Trait, AbsorbInROTrait, Engine, Group, ROTrait, TranscriptEngineTrait,
23    TranscriptReprTrait,
24  },
25};
26use core::{
27  iter,
28  marker::PhantomData,
29  ops::Range,
30  ops::{Add, Mul, MulAssign},
31  slice,
32};
33use ff::Field;
34#[cfg(any(test, feature = "test-utils"))]
35use ff::PrimeFieldBits;
36use num_integer::Integer;
37use num_traits::ToPrimitive;
38#[cfg(any(test, feature = "test-utils"))]
39use rand_core::OsRng;
40use rayon::prelude::*;
41use serde::{Deserialize, Serialize};
42use serde_with::serde_as;
43
44/// Alias to points on G1 that are in preprocessed form
45type G1Affine<E> = <<E as Engine>::GE as DlogGroup>::AffineGroupElement;
46
47/// Alias to points on G1 that are in preprocessed form
48type G2Affine<E> = <<<E as Engine>::GE as PairingGroup>::G2 as DlogGroup>::AffineGroupElement;
49
50/// Default number of target chunks used in splitting up polynomial division in the kzg_open closure
51const DEFAULT_TARGET_CHUNKS: usize = 1 << 10;
52
53/// KZG commitment key
54#[derive(Debug, Clone, Serialize, Deserialize)]
55pub struct CommitmentKey<E: Engine>
56where
57  E::GE: PairingGroup,
58{
59  ck: Vec<<E::GE as DlogGroup>::AffineGroupElement>,
60  h: <E::GE as DlogGroup>::AffineGroupElement,
61  tau_H: <<E::GE as PairingGroup>::G2 as DlogGroup>::AffineGroupElement, // needed only for the verifier key
62}
63
64impl<E: Engine> CommitmentKey<E>
65where
66  E::GE: PairingGroup,
67{
68  /// Create a new commitment key
69  pub fn new(
70    ck: Vec<<E::GE as DlogGroup>::AffineGroupElement>,
71    h: <E::GE as DlogGroup>::AffineGroupElement,
72    tau_H: <<E::GE as PairingGroup>::G2 as DlogGroup>::AffineGroupElement,
73  ) -> Self {
74    Self { ck, h, tau_H }
75  }
76
77  /// Returns a reference to the ck field
78  pub fn ck(&self) -> &[<E::GE as DlogGroup>::AffineGroupElement] {
79    &self.ck
80  }
81
82  /// Returns a reference to the h field
83  pub fn h(&self) -> &<E::GE as DlogGroup>::AffineGroupElement {
84    &self.h
85  }
86
87  /// Returns a reference to the tau_H field
88  pub fn tau_H(&self) -> &<<E::GE as PairingGroup>::G2 as DlogGroup>::AffineGroupElement {
89    &self.tau_H
90  }
91
92  /// Returns the coordinates of the generator points.
93  ///
94  /// # Panics
95  ///
96  /// Panics if any generator point is the point at infinity.
97  pub fn to_coordinates(&self) -> Vec<(E::Base, E::Base)> {
98    self
99      .ck
100      .par_iter()
101      .map(|c| {
102        let (x, y, is_infinity) = <E::GE as DlogGroup>::group(c).to_coordinates();
103        assert!(!is_infinity);
104        (x, y)
105      })
106      .collect()
107  }
108}
109
110impl<E: Engine> Len for CommitmentKey<E>
111where
112  E::GE: PairingGroup,
113{
114  fn length(&self) -> usize {
115    self.ck.len()
116  }
117}
118
119/// A type that holds blinding generator
120#[serde_as]
121#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
122#[serde(bound = "")]
123pub struct DerandKey<E: Engine>
124where
125  E::GE: DlogGroup,
126{
127  #[serde_as(as = "EvmCompatSerde")]
128  h: <E::GE as DlogGroup>::AffineGroupElement,
129}
130
131/// A KZG commitment
132#[serde_as]
133#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
134#[serde(bound = "")]
135pub struct Commitment<E: Engine>
136where
137  E::GE: PairingGroup,
138{
139  #[serde_as(as = "EvmCompatSerde")]
140  comm: E::GE,
141}
142
143impl<E: Engine> Commitment<E>
144where
145  E::GE: PairingGroup,
146{
147  /// Creates a new commitment from the underlying group element
148  pub fn new(comm: E::GE) -> Self {
149    Commitment { comm }
150  }
151  /// Returns the commitment as a group element
152  pub fn into_inner(self) -> E::GE {
153    self.comm
154  }
155}
156
157impl<E: Engine> CommitmentTrait<E> for Commitment<E>
158where
159  E::GE: PairingGroup,
160{
161  fn to_coordinates(&self) -> (E::Base, E::Base, bool) {
162    self.comm.to_coordinates()
163  }
164}
165
166impl<E: Engine> Default for Commitment<E>
167where
168  E::GE: PairingGroup,
169{
170  fn default() -> Self {
171    Commitment {
172      comm: E::GE::zero(),
173    }
174  }
175}
176
177impl<E: Engine> TranscriptReprTrait<E::GE> for Commitment<E>
178where
179  E::GE: PairingGroup,
180{
181  fn to_transcript_bytes(&self) -> Vec<u8> {
182    use crate::traits::Group;
183    let (x, y, is_infinity) = self.comm.to_coordinates();
184    // Get curve parameter B to determine encoding strategy
185    let (_, b, _, _) = E::GE::group_params();
186
187    if b != E::Base::ZERO {
188      // For curves with B!=0 (like BN254 with B=3, Grumpkin with B=-5),
189      // (0, 0) doesn't lie on the curve (since 0 != 0 + 0 + B),
190      // so point at infinity can be safely encoded as (0, 0).
191      let (x, y) = if is_infinity {
192        (E::Base::ZERO, E::Base::ZERO)
193      } else {
194        (x, y)
195      };
196      [x.to_transcript_bytes(), y.to_transcript_bytes()].concat()
197    } else {
198      // For curves with B=0, (0, 0) lies on the curve, so we need the is_infinity flag
199      let is_infinity_byte = (!is_infinity).into();
200      [
201        x.to_transcript_bytes(),
202        y.to_transcript_bytes(),
203        [is_infinity_byte].to_vec(),
204      ]
205      .concat()
206    }
207  }
208}
209
210impl<E: Engine> AbsorbInROTrait<E> for Commitment<E>
211where
212  E::GE: PairingGroup,
213{
214  fn absorb_in_ro(&self, ro: &mut E::RO) {
215    let (x, y, is_infinity) = self.comm.to_coordinates();
216    // When B != 0 (true for BN254, Grumpkin, etc.), (0,0) is not on the curve
217    // so we can use it as a canonical representation for infinity.
218    let (_, b, _, _) = E::GE::group_params();
219    if b != E::Base::ZERO {
220      let (x, y) = if is_infinity {
221        (E::Base::ZERO, E::Base::ZERO)
222      } else {
223        (x, y)
224      };
225      ro.absorb(x);
226      ro.absorb(y);
227    } else {
228      ro.absorb(x);
229      ro.absorb(y);
230      ro.absorb(if is_infinity {
231        E::Base::ONE
232      } else {
233        E::Base::ZERO
234      });
235    }
236  }
237}
238
239impl<E: Engine> AbsorbInRO2Trait<E> for Commitment<E>
240where
241  E::GE: PairingGroup,
242{
243  fn absorb_in_ro2(&self, ro: &mut E::RO2) {
244    let (x, y, is_infinity) = self.comm.to_coordinates();
245    // When B != 0, use (0,0) for infinity
246    let (_, b, _, _) = E::GE::group_params();
247    let (x, y) = if b != E::Base::ZERO && is_infinity {
248      (E::Base::ZERO, E::Base::ZERO)
249    } else {
250      (x, y)
251    };
252
253    // we have to absorb x and y in big num format
254    let limbs_x = to_bignat_repr(&x);
255    let limbs_y = to_bignat_repr(&y);
256
257    for limb in limbs_x.iter().chain(limbs_y.iter()) {
258      ro.absorb(*limb);
259    }
260    // Only absorb is_infinity when B == 0
261    if b == E::Base::ZERO {
262      ro.absorb(if is_infinity {
263        E::Scalar::ONE
264      } else {
265        E::Scalar::ZERO
266      });
267    }
268  }
269}
270
271impl<E: Engine> MulAssign<E::Scalar> for Commitment<E>
272where
273  E::GE: PairingGroup,
274{
275  fn mul_assign(&mut self, scalar: E::Scalar) {
276    let result = self.comm * scalar;
277    *self = Commitment { comm: result };
278  }
279}
280
281impl<'b, E: Engine> Mul<&'b E::Scalar> for &'_ Commitment<E>
282where
283  E::GE: PairingGroup,
284{
285  type Output = Commitment<E>;
286
287  fn mul(self, scalar: &'b E::Scalar) -> Commitment<E> {
288    Commitment {
289      comm: self.comm * scalar,
290    }
291  }
292}
293
294impl<E: Engine> Mul<E::Scalar> for Commitment<E>
295where
296  E::GE: PairingGroup,
297{
298  type Output = Commitment<E>;
299
300  fn mul(self, scalar: E::Scalar) -> Commitment<E> {
301    Commitment {
302      comm: self.comm * scalar,
303    }
304  }
305}
306
307impl<E: Engine> Add for Commitment<E>
308where
309  E::GE: PairingGroup,
310{
311  type Output = Commitment<E>;
312
313  fn add(self, other: Commitment<E>) -> Commitment<E> {
314    Commitment {
315      comm: self.comm + other.comm,
316    }
317  }
318}
319
320/// Provides a commitment engine
321#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
322pub struct CommitmentEngine<E: Engine> {
323  _p: PhantomData<E>,
324}
325
326/// Test-only methods for generating commitment keys with known tau.
327/// These methods are insecure for production use - use `load_setup` with ptau files instead.
328#[cfg(any(test, feature = "test-utils"))]
329impl<E: Engine> CommitmentKey<E>
330where
331  E::GE: PairingGroup,
332{
333  /// NOTE: this is for testing purposes and should not be used in production.
334  /// In production, use [`PublicParams::setup_with_ptau_dir`] or
335  /// [`R1CSShape::commitment_key_from_ptau_dir`] with ptau files from a trusted setup ceremony.
336  ///
337  /// This generates a commitment key with a random tau value. Since the caller
338  /// (or anyone with access to the RNG state) knows tau, this is insecure.
339  pub fn setup_from_rng(label: &'static [u8], n: usize, rng: impl rand_core::RngCore) -> Self {
340    const T1: usize = 1 << 16;
341    const T2: usize = 100_000;
342
343    let num_gens = n.next_power_of_two();
344
345    let tau = E::Scalar::random(rng);
346
347    let powers_of_tau = if num_gens < T1 {
348      Self::compute_powers_serial(tau, num_gens)
349    } else {
350      Self::compute_powers_par(tau, num_gens)
351    };
352
353    if num_gens < T2 {
354      Self::setup_from_tau_direct(label, &powers_of_tau, tau)
355    } else {
356      Self::setup_from_tau_fixed_base_exp(label, &powers_of_tau)
357    }
358  }
359
360  fn setup_from_tau_fixed_base_exp(label: &'static [u8], powers_of_tau: &[E::Scalar]) -> Self {
361    let tau = powers_of_tau[1];
362
363    let gen = <E::GE as DlogGroup>::gen();
364
365    let ck = fixed_base_exp_comb_batch::<4, 16, 64, 2, 32, _>(gen, powers_of_tau);
366    let ck = ck.par_iter().map(|p| p.affine()).collect();
367
368    let h = *E::GE::from_label(label, 1).first().unwrap();
369
370    let tau_H = (<<E::GE as PairingGroup>::G2 as DlogGroup>::gen() * tau).affine();
371
372    Self { ck, h, tau_H }
373  }
374
375  fn setup_from_tau_direct(
376    label: &'static [u8],
377    powers_of_tau: &[E::Scalar],
378    tau: E::Scalar,
379  ) -> Self {
380    let num_gens = powers_of_tau.len();
381
382    let ck: Vec<G1Affine<E>> = (0..num_gens)
383      .into_par_iter()
384      .map(|i| (<E::GE as DlogGroup>::gen() * powers_of_tau[i]).affine())
385      .collect();
386
387    let h = *E::GE::from_label(label, 1).first().unwrap();
388
389    let tau_H = (<<E::GE as PairingGroup>::G2 as DlogGroup>::gen() * tau).affine();
390
391    Self { ck, h, tau_H }
392  }
393
394  fn compute_powers_serial(tau: E::Scalar, n: usize) -> Vec<E::Scalar> {
395    let mut powers_of_tau = Vec::with_capacity(n);
396    powers_of_tau.insert(0, E::Scalar::ONE);
397    for i in 1..n {
398      powers_of_tau.insert(i, powers_of_tau[i - 1] * tau);
399    }
400    powers_of_tau
401  }
402
403  fn compute_powers_par(tau: E::Scalar, n: usize) -> Vec<E::Scalar> {
404    let num_threads = rayon::current_num_threads();
405    (0..n)
406      .collect::<Vec<_>>()
407      .par_chunks(std::cmp::max(n / num_threads, 1))
408      .into_par_iter()
409      .map(|sub_list| {
410        let mut res = Vec::with_capacity(sub_list.len());
411        res.push(tau.pow([sub_list[0] as u64]));
412        for i in 1..sub_list.len() {
413          res.push(res[i - 1] * tau);
414        }
415        res
416      })
417      .flatten()
418      .collect::<Vec<_>>()
419  }
420}
421
422// * Implementation of https://www.weimerskirch.org/files/Weimerskirch_FixedBase.pdf
423// Only used by test-only setup code
424#[cfg(any(test, feature = "test-utils"))]
425fn fixed_base_exp_comb_batch<
426  const H: usize,
427  const POW_2_H: usize,
428  const A: usize,
429  const B: usize,
430  const V: usize,
431  G: DlogGroup,
432>(
433  gen: G,
434  scalars: &[G::Scalar],
435) -> Vec<G> {
436  assert_eq!(1 << H, POW_2_H);
437  assert_eq!(A, V * B);
438  assert!(A <= 64);
439
440  let zero = G::zero();
441  let one = gen;
442
443  let gi = {
444    let mut res = [one; H];
445    for i in 1..H {
446      let prod = (0..A).fold(res[i - 1], |acc, _| acc + acc);
447      res[i] = prod;
448    }
449    res
450  };
451
452  let mut precompute_res = (1..POW_2_H)
453    .into_par_iter()
454    .map(|i| {
455      let mut res = [zero; V];
456
457      // * G[0][i]
458      let mut g_0_i = zero;
459      for (j, item) in gi.iter().enumerate().take(H) {
460        if (1 << j) & i > 0 {
461          g_0_i += item;
462        }
463      }
464
465      res[0] = g_0_i;
466
467      // * G[j][i]
468      for j in 1..V {
469        res[j] = (0..B).fold(res[j - 1], |acc, _| acc + acc);
470      }
471
472      res
473    })
474    .collect::<Vec<_>>();
475
476  precompute_res.insert(0, [zero; V]);
477
478  let precomputed_g: [_; POW_2_H] = std::array::from_fn(|j| precompute_res[j]);
479
480  let zero = G::zero();
481
482  scalars
483    .par_iter()
484    .map(|e| {
485      let mut a = zero;
486      let mut bits = e.to_le_bits().into_iter().collect::<Vec<_>>();
487
488      while bits.len() % A != 0 {
489        bits.push(false);
490      }
491
492      for k in (0..B).rev() {
493        a += a;
494        for j in (0..V).rev() {
495          let i_j_k = (0..H)
496            .map(|h| {
497              let b = bits[h * A + j * B + k];
498              (1 << h) * b as usize
499            })
500            .sum::<usize>();
501
502          if i_j_k > 0 {
503            a += precomputed_g[i_j_k][j];
504          }
505        }
506      }
507
508      a
509    })
510    .collect::<Vec<_>>()
511}
512
513impl<E: Engine> CommitmentEngineTrait<E> for CommitmentEngine<E>
514where
515  E::GE: PairingGroup,
516{
517  type Commitment = Commitment<E>;
518  type CommitmentKey = CommitmentKey<E>;
519  type DerandKey = DerandKey<E>;
520
521  /// Generate a commitment key with a random tau value.
522  ///
523  /// # Availability
524  ///
525  /// This method is only available in test builds or when the `test-utils` feature is enabled.
526  /// In production builds, this will return an error.
527  ///
528  /// # Security Warning
529  ///
530  /// This method generates an **insecure** commitment key using a random tau.
531  /// The security of HyperKZG relies on no one knowing the secret tau value.
532  ///
533  /// **For production use**, call [`PublicParams::setup_with_ptau_dir`] or
534  /// [`R1CSShape::commitment_key_from_ptau_dir`] to load commitment keys from
535  /// Powers of Tau ceremony files (e.g., from the Ethereum PPOT ceremony).
536  ///
537  /// # For downstream crates
538  ///
539  /// If you need access to this method in your tests, add `nova-snark` with the
540  /// `test-utils` feature to your `dev-dependencies`:
541  ///
542  /// ```toml
543  /// [dev-dependencies]
544  /// nova-snark = { version = "...", features = ["test-utils"] }
545  /// ```
546  #[cfg(any(test, feature = "test-utils"))]
547  fn setup(label: &'static [u8], n: usize) -> Result<Self::CommitmentKey, NovaError> {
548    Ok(Self::CommitmentKey::setup_from_rng(label, n, OsRng))
549  }
550
551  #[cfg(not(any(test, feature = "test-utils")))]
552  fn setup(_label: &'static [u8], _n: usize) -> Result<Self::CommitmentKey, NovaError> {
553    Err(NovaError::SetupError(
554      "HyperKZG::setup is disabled in production builds. \
555       Use PublicParams::setup_with_ptau_dir or R1CSShape::commitment_key_from_ptau_dir \
556       with ptau files from a trusted setup ceremony. \
557       For tests, enable the 'test-utils' feature."
558        .to_string(),
559    ))
560  }
561
562  fn derand_key(ck: &Self::CommitmentKey) -> Self::DerandKey {
563    Self::DerandKey { h: ck.h }
564  }
565
566  fn commit(ck: &Self::CommitmentKey, v: &[E::Scalar], r: &E::Scalar) -> Self::Commitment {
567    assert!(ck.ck.len() >= v.len());
568
569    Commitment {
570      comm: E::GE::vartime_multiscalar_mul(v, &ck.ck[..v.len()])
571        + <E::GE as DlogGroup>::group(&ck.h) * r,
572    }
573  }
574
575  fn batch_commit(
576    ck: &Self::CommitmentKey,
577    v: &[Vec<<E as Engine>::Scalar>],
578    r: &[<E as Engine>::Scalar],
579  ) -> Vec<Self::Commitment> {
580    assert!(v.len() == r.len());
581
582    let max = v.iter().map(|v| v.len()).max().unwrap_or(0);
583    assert!(ck.ck.len() >= max);
584
585    let h = <E::GE as DlogGroup>::group(&ck.h);
586
587    E::GE::batch_vartime_multiscalar_mul(v, &ck.ck[..max])
588      .par_iter()
589      .zip(r.par_iter())
590      .map(|(commit, r_i)| Commitment {
591        comm: *commit + (h * r_i),
592      })
593      .collect()
594  }
595
596  fn commit_small<T: Integer + Into<u64> + Copy + Sync + ToPrimitive>(
597    ck: &Self::CommitmentKey,
598    v: &[T],
599    r: &E::Scalar,
600  ) -> Self::Commitment {
601    assert!(ck.ck.len() >= v.len());
602    Commitment {
603      comm: E::GE::vartime_multiscalar_mul_small(v, &ck.ck[..v.len()])
604        + <E::GE as DlogGroup>::group(&ck.h) * r,
605    }
606  }
607
608  fn batch_commit_small<T: Integer + Into<u64> + Copy + Sync + ToPrimitive>(
609    ck: &Self::CommitmentKey,
610    v: &[Vec<T>],
611    r: &[E::Scalar],
612  ) -> Vec<Self::Commitment> {
613    assert!(v.len() == r.len());
614
615    let max = v.iter().map(|v| v.len()).max().unwrap_or(0);
616    assert!(ck.ck.len() >= max);
617
618    let h = <E::GE as DlogGroup>::group(&ck.h);
619
620    E::GE::batch_vartime_multiscalar_mul_small(v, &ck.ck[..max])
621      .iter()
622      .zip(r.iter())
623      .map(|(commit, r_i)| Commitment {
624        comm: *commit + (h * r_i),
625      })
626      .collect()
627  }
628
629  fn derandomize(
630    dk: &Self::DerandKey,
631    commit: &Self::Commitment,
632    r: &E::Scalar,
633  ) -> Self::Commitment {
634    Commitment {
635      comm: commit.comm - <E::GE as DlogGroup>::group(&dk.h) * r,
636    }
637  }
638
639  #[cfg(feature = "io")]
640  fn load_setup(
641    reader: &mut (impl std::io::Read + std::io::Seek),
642    label: &'static [u8],
643    n: usize,
644  ) -> Result<Self::CommitmentKey, PtauFileError> {
645    let num = n.next_power_of_two();
646
647    // read points as well as check sanity of ptau file
648    let (g1_points, g2_points) = read_ptau(reader, num, 2)?;
649
650    let ck = g1_points.to_vec();
651
652    let tau_H = *g2_points.last().unwrap();
653
654    let h = *E::GE::from_label(label, 1).first().unwrap();
655
656    Ok(CommitmentKey { ck, h, tau_H })
657  }
658
659  /// Save keys
660  #[cfg(feature = "io")]
661  fn save_setup(
662    ck: &Self::CommitmentKey,
663    mut writer: &mut (impl std::io::Write + std::io::Seek),
664  ) -> Result<(), PtauFileError> {
665    let g1_points = ck.ck.clone();
666
667    let g2_points = vec![ck.tau_H, ck.tau_H];
668    let power = g1_points.len().next_power_of_two().trailing_zeros() + 1;
669
670    write_ptau(&mut writer, g1_points, g2_points, power)
671  }
672
673  fn commit_small_range<T: Integer + Into<u64> + Copy + Sync + ToPrimitive>(
674    ck: &Self::CommitmentKey,
675    v: &[T],
676    r: &<E as Engine>::Scalar,
677    range: Range<usize>,
678    max_num_bits: usize,
679  ) -> Self::Commitment {
680    let bases = &ck.ck[range.clone()];
681    let scalars = &v[range];
682
683    assert!(bases.len() == scalars.len());
684
685    let mut res =
686      E::GE::vartime_multiscalar_mul_small_with_max_num_bits(scalars, bases, max_num_bits);
687
688    if r != &E::Scalar::ZERO {
689      res += <E::GE as DlogGroup>::group(&ck.h) * r;
690    }
691
692    Commitment { comm: res }
693  }
694
695  fn ck_to_coordinates(ck: &Self::CommitmentKey) -> Vec<(E::Base, E::Base)> {
696    ck.to_coordinates()
697  }
698
699  fn ck_to_group_elements(ck: &Self::CommitmentKey) -> Vec<E::GE> {
700    ck.ck()
701      .par_iter()
702      .map(|g| {
703        let ge = E::GE::group(g);
704        assert!(
705          ge != E::GE::zero(),
706          "CommitmentKey contains a generator at infinity"
707        );
708        ge
709      })
710      .collect()
711  }
712
713  fn ck_derive_by_address(
714    ck: &Self::CommitmentKey,
715    addresses: &[usize],
716    table_size: usize,
717  ) -> Result<Self::CommitmentKey, NovaError> {
718    let bases = Self::ck_to_group_elements(ck);
719    if addresses.len() > bases.len() {
720      return Err(NovaError::InvalidCommitmentKeyLength);
721    }
722    if addresses.iter().any(|&j| j >= table_size) {
723      return Err(NovaError::InvalidIndex);
724    }
725    let mut acc = vec![E::GE::zero(); table_size];
726    for (i, &j) in addresses.iter().enumerate() {
727      acc[j] += bases[i];
728    }
729    let ck_affine = acc.par_iter().map(|g| g.affine()).collect();
730    Ok(CommitmentKey::new(ck_affine, *ck.h(), *ck.tau_H()))
731  }
732
733  fn commit_sparse_binary(
734    ck: &Self::CommitmentKey,
735    non_zero_indices: &[usize],
736    r: &<E as Engine>::Scalar,
737  ) -> Self::Commitment {
738    let comm = batch_add(&ck.ck, non_zero_indices);
739    let mut comm = <E::GE as DlogGroup>::group(&comm.into());
740
741    if r != &E::Scalar::ZERO {
742      comm += <E::GE as DlogGroup>::group(&ck.h) * r;
743    }
744
745    Commitment { comm }
746  }
747}
748
749/// Provides an implementation of generators for proving evaluations
750#[derive(Clone, Debug, Serialize, Deserialize)]
751#[serde(bound = "")]
752pub struct ProverKey<E: Engine> {
753  _p: PhantomData<E>,
754}
755
756/// A verifier key
757#[serde_as]
758#[derive(Clone, Debug, Serialize, Deserialize)]
759#[serde(bound = "")]
760pub struct VerifierKey<E: Engine>
761where
762  E::GE: PairingGroup,
763{
764  #[serde_as(as = "EvmCompatSerde")]
765  pub(crate) G: G1Affine<E>,
766  #[serde_as(as = "EvmCompatSerde")]
767  pub(crate) H: G2Affine<E>,
768  #[serde_as(as = "EvmCompatSerde")]
769  pub(crate) tau_H: G2Affine<E>,
770}
771
772/// Provides an implementation of a polynomial evaluation argument
773#[serde_as]
774#[derive(Clone, Debug, Serialize, Deserialize)]
775#[serde(bound = "")]
776pub struct EvaluationArgument<E: Engine>
777where
778  E::GE: PairingGroup,
779{
780  #[serde_as(as = "Vec<EvmCompatSerde>")]
781  com: Vec<G1Affine<E>>,
782  #[serde_as(as = "[EvmCompatSerde; 3]")]
783  w: [G1Affine<E>; 3],
784  #[serde_as(as = "Vec<[EvmCompatSerde; 3]>")]
785  v: Vec<[E::Scalar; 3]>,
786}
787
788impl<E: Engine> EvaluationArgument<E>
789where
790  E::GE: PairingGroup,
791{
792  /// Create a new evaluation argument
793  pub fn new(com: Vec<G1Affine<E>>, w: [G1Affine<E>; 3], v: Vec<[E::Scalar; 3]>) -> Self {
794    Self { com, w, v }
795  }
796  /// The KZG commitments to intermediate polynomials
797  pub fn com(&self) -> &[G1Affine<E>] {
798    &self.com
799  }
800  /// The KZG witnesses for batch openings
801  pub fn w(&self) -> &[G1Affine<E>] {
802    &self.w
803  }
804  /// The evaluations of the polynomials at challenge points
805  pub fn v(&self) -> &[[E::Scalar; 3]] {
806    &self.v
807  }
808}
809
810/// Provides an implementation of a polynomial evaluation engine using KZG
811#[derive(Clone, Debug, Serialize, Deserialize)]
812pub struct EvaluationEngine<E: Engine> {
813  _p: PhantomData<E>,
814}
815
816impl<E: Engine> EvaluationEngine<E>
817where
818  E::GE: PairingGroup,
819{
820  // This impl block defines helper functions that are not a part of
821  // EvaluationEngineTrait, but that we will use to implement the trait methods.
822  fn compute_challenge(com: &[G1Affine<E>], transcript: &mut <E as Engine>::TE) -> E::Scalar {
823    transcript.absorb(b"c", &com.to_vec().as_slice());
824
825    transcript.squeeze(b"c").unwrap()
826  }
827
828  // Compute challenge q = Hash(vk, C0, ..., C_{k-1}, u0, ...., u_{t-1},
829  // (f_i(u_j))_{i=0..k-1,j=0..t-1})
830  fn get_batch_challenge(v: &[[E::Scalar; 3]], transcript: &mut <E as Engine>::TE) -> E::Scalar {
831    transcript.absorb(
832      b"v",
833      &v.iter()
834        .flatten()
835        .cloned()
836        .collect::<Vec<E::Scalar>>()
837        .as_slice(),
838    );
839
840    transcript.squeeze(b"r").unwrap()
841  }
842
843  fn batch_challenge_powers(q: E::Scalar, k: usize) -> Vec<E::Scalar> {
844    // Compute powers of q : (1, q, q^2, ..., q^(k-1))
845    let mut q_powers = vec![E::Scalar::ONE; k];
846    for i in 1..k {
847      q_powers[i] = q_powers[i - 1] * q;
848    }
849    q_powers
850  }
851
852  fn verifier_second_challenge(W: &[G1Affine<E>], transcript: &mut <E as Engine>::TE) -> E::Scalar {
853    transcript.absorb(b"W", &W.to_vec().as_slice());
854
855    transcript.squeeze(b"d").unwrap()
856  }
857}
858
859impl<E> EvaluationEngineTrait<E> for EvaluationEngine<E>
860where
861  E: Engine<CE = CommitmentEngine<E>>,
862  E::GE: PairingGroup,
863{
864  type EvaluationArgument = EvaluationArgument<E>;
865  type ProverKey = ProverKey<E>;
866  type VerifierKey = VerifierKey<E>;
867
868  fn setup(
869    ck: &<E::CE as CommitmentEngineTrait<E>>::CommitmentKey,
870  ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> {
871    let pk = ProverKey {
872      _p: Default::default(),
873    };
874
875    let vk = VerifierKey {
876      G: E::GE::gen().affine(),
877      H: <<E::GE as PairingGroup>::G2 as DlogGroup>::gen().affine(),
878      tau_H: ck.tau_H,
879    };
880
881    Ok((pk, vk))
882  }
883
884  fn prove(
885    ck: &CommitmentKey<E>,
886    _pk: &Self::ProverKey,
887    transcript: &mut <E as Engine>::TE,
888    _C: &Commitment<E>,
889    hat_P: &[E::Scalar],
890    point: &[E::Scalar],
891    _eval: &E::Scalar,
892  ) -> Result<Self::EvaluationArgument, NovaError> {
893    let x: Vec<E::Scalar> = point.to_vec();
894
895    //////////////// begin helper closures //////////
896    let kzg_open = |f: &[E::Scalar], u: E::Scalar| -> G1Affine<E> {
897      // Divides polynomial f(x) by (x - u) to obtain the witness polynomial h(x) = f(x)/(x - u)
898      // for KZG opening.
899      //
900      // This implementation uses a chunking strategy to enable parallelization:
901      // - Divides the polynomial into chunks
902      // - Processes chunks in parallel using Rayon's par_chunks_exact_mut
903      // - Within each chunk, maintains a "running" partial result
904      // - Combines results across chunk boundaries
905      //
906      // While this adds more total computation than the standard Horner's method,
907      // the parallel execution provides significant speedup for large polynomials.
908      //
909      // Original sequential algorithm using Horner's method:
910      // ```
911      // let mut h = vec![E::Scalar::ZERO; d];
912      // for i in (1..d).rev() {
913      //   h[i - 1] = f[i] + h[i] * u;
914      // }
915      // ```
916      //
917      // The resulting polynomial h(x) satisfies: f(x) = h(x) * (x - u)
918      // The degree of h(x) is one less than the degree of f(x).
919      let div_by_monomial =
920        |f: &[E::Scalar], u: E::Scalar, target_chunks: usize| -> Vec<E::Scalar> {
921          assert!(!f.is_empty());
922          let target_chunk_size = f.len() / target_chunks;
923          let log2_chunk_size = target_chunk_size.max(1).ilog2();
924          let chunk_size = 1 << log2_chunk_size;
925
926          let u_to_the_chunk_size = (0..log2_chunk_size).fold(u, |u_pow, _| u_pow.square());
927          let mut result = f.to_vec();
928          result
929            .par_chunks_mut(chunk_size)
930            .zip(f.par_chunks(chunk_size))
931            .for_each(|(chunk, f_chunk)| {
932              for i in (0..chunk.len() - 1).rev() {
933                chunk[i] = f_chunk[i] + u * chunk[i + 1];
934              }
935            });
936
937          let mut iter = result.chunks_mut(chunk_size).rev();
938          if let Some(last_chunk) = iter.next() {
939            let mut prev_partial = last_chunk[0];
940            for chunk in iter {
941              prev_partial = chunk[0] + u_to_the_chunk_size * prev_partial;
942              chunk[0] = prev_partial;
943            }
944          }
945
946          result[1..]
947            .par_chunks_exact_mut(chunk_size)
948            .rev()
949            .for_each(|chunk| {
950              let mut prev_partial = chunk[chunk_size - 1];
951              for e in chunk.iter_mut().rev().skip(1) {
952                prev_partial *= u;
953                *e += prev_partial;
954              }
955            });
956          result[1..].to_vec()
957        };
958
959      let target_chunks = DEFAULT_TARGET_CHUNKS;
960      let h = &div_by_monomial(f, u, target_chunks);
961
962      E::CE::commit(ck, h, &E::Scalar::ZERO).comm.affine()
963    };
964
965    let kzg_open_batch = |f: &[Vec<E::Scalar>],
966                          u: &[E::Scalar; 3],
967                          transcript: &mut <E as Engine>::TE|
968     -> (Vec<G1Affine<E>>, Vec<[E::Scalar; 3]>) {
969      let poly_eval = |f: &[E::Scalar], u: E::Scalar| -> E::Scalar {
970        // Horner's method
971        let mut acc = E::Scalar::ZERO;
972        for &fi in f.iter().rev() {
973          acc = acc * u + fi;
974        }
975
976        acc
977      };
978
979      let scalar_vector_muladd = |a: &mut Vec<E::Scalar>, v: &Vec<E::Scalar>, s: E::Scalar| {
980        assert!(a.len() >= v.len());
981        a.par_iter_mut().zip(v.par_iter()).for_each(|(a_i, v_i)| {
982          *a_i += s * *v_i;
983        });
984      };
985
986      let kzg_compute_batch_polynomial = |f: &[Vec<E::Scalar>], q: E::Scalar| -> Vec<E::Scalar> {
987        let k = f.len(); // Number of polynomials we're batching
988
989        let q_powers = Self::batch_challenge_powers(q, k);
990
991        // Compute B(x) = f[0] + q*f[1] + q^2 * f[2] + ... q^(k-1) * f[k-1]
992        let mut B = f[0].clone();
993        for i in 1..k {
994          scalar_vector_muladd(&mut B, &f[i], q_powers[i]); // B += q_powers[i] * f[i]
995        }
996
997        B
998      };
999      ///////// END kzg_open_batch closure helpers
1000
1001      let k = f.len();
1002      // Note: u.len() is always 3.
1003
1004      // The verifier needs f_i(u_j), so we compute them here
1005      // (V will compute B(u_j) itself)
1006      let mut v = vec![[E::Scalar::ZERO; 3]; k];
1007      v.par_iter_mut().zip_eq(f).for_each(|(v_j, f)| {
1008        // for each poly f
1009        // for each poly f (except the last one - since it is constant)
1010        v_j.par_iter_mut().enumerate().for_each(|(i, v_ij)| {
1011          // for each point u
1012          *v_ij = poly_eval(f, u[i]);
1013        });
1014      });
1015
1016      let q = Self::get_batch_challenge(&v, transcript);
1017      let B = kzg_compute_batch_polynomial(f, q);
1018
1019      // Now open B at u0, ..., u_{t-1}
1020      let w = u
1021        .into_par_iter()
1022        .map(|ui| kzg_open(&B, *ui))
1023        .collect::<Vec<G1Affine<E>>>();
1024
1025      // The prover computes the challenge to keep the transcript in the same
1026      // state as that of the verifier
1027      let _d_0 = Self::verifier_second_challenge(&w, transcript);
1028
1029      (w, v)
1030    };
1031
1032    ///// END helper closures //////////
1033
1034    let ell = x.len();
1035    let n = hat_P.len();
1036    assert_eq!(n, 1 << ell); // Below we assume that n is a power of two
1037
1038    // Phase 1  -- create commitments com_1, ..., com_\ell
1039    // We do not compute final Pi (and its commitment) as it is constant and equals to 'eval'
1040    // also known to verifier, so can be derived on its side as well
1041    let mut polys: Vec<Vec<E::Scalar>> = Vec::new();
1042    polys.push(hat_P.to_vec());
1043    for i in 0..ell - 1 {
1044      let Pi_len = polys[i].len() / 2;
1045      let mut Pi = vec![E::Scalar::ZERO; Pi_len];
1046
1047      #[allow(clippy::needless_range_loop)]
1048      Pi.par_iter_mut().enumerate().for_each(|(j, Pi_j)| {
1049        *Pi_j = x[ell - i - 1] * (polys[i][2 * j + 1] - polys[i][2 * j]) + polys[i][2 * j];
1050      });
1051
1052      polys.push(Pi);
1053    }
1054
1055    // We do not need to commit to the first polynomial as it is already committed.
1056    // Compute commitments in parallel
1057    let r = vec![E::Scalar::ZERO; ell - 1];
1058    let com: Vec<G1Affine<E>> = E::CE::batch_commit(ck, &polys[1..], r.as_slice())
1059      .par_iter()
1060      .map(|i| i.comm.affine())
1061      .collect();
1062
1063    // Phase 2
1064    // We do not need to add x to the transcript, because in our context x was obtained from the transcript.
1065    // We also do not need to absorb `C` and `eval` as they are already absorbed by the transcript by the caller
1066    let r = Self::compute_challenge(&com, transcript);
1067    let u = [r, -r, r * r];
1068
1069    // Phase 3 -- create response
1070    let (w, v) = kzg_open_batch(&polys, &u, transcript);
1071
1072    Ok(EvaluationArgument {
1073      com,
1074      w: w.try_into().expect("w should have length 3"),
1075      v,
1076    })
1077  }
1078
1079  /// A method to verify purported evaluations of a batch of polynomials
1080  fn verify(
1081    vk: &Self::VerifierKey,
1082    transcript: &mut <E as Engine>::TE,
1083    C: &Commitment<E>,
1084    x: &[E::Scalar],
1085    y: &E::Scalar,
1086    pi: &Self::EvaluationArgument,
1087  ) -> Result<(), NovaError> {
1088    let ell = x.len();
1089
1090    // we do not need to add x to the transcript, because in our context x was
1091    // obtained from the transcript
1092    let r = Self::compute_challenge(&pi.com, transcript);
1093
1094    let u = [r, -r, r * r];
1095
1096    // Setup vectors (Y, ypos, yneg) from pi.v
1097    if pi.v.len() != ell || pi.com.len() != ell - 1 {
1098      return Err(NovaError::ProofVerifyError {
1099        reason: "Invalid lengths of pi.v".to_string(),
1100      });
1101    }
1102
1103    // Check consistency of (Y, ypos, yneg)
1104    for i in 0..ell {
1105      let ypos = pi.v[i][0];
1106      let yneg = pi.v[i][1];
1107      let Y = pi.v.get(i + 1).map_or(*y, |v| v[2]);
1108      if r.double() * Y
1109        != r * (E::Scalar::ONE - x[ell - i - 1]) * (ypos + yneg) + x[ell - i - 1] * (ypos - yneg)
1110      {
1111        return Err(NovaError::ProofVerifyError {
1112          reason: "Inconsistent (Y, ypos, yneg)".to_string(),
1113        });
1114      }
1115      // Note that we don't make any checks about Y[0] here, but our batching
1116      // check below requires it
1117    }
1118
1119    // Check commitments to (Y, ypos, yneg) are valid
1120
1121    // vk is hashed in transcript already, so we do not add it here
1122
1123    let q = Self::get_batch_challenge(&pi.v, transcript);
1124
1125    let d_0 = Self::verifier_second_challenge(&pi.w, transcript);
1126    let d_1 = d_0.square();
1127
1128    // We write a special case for t=3, since this what is required for
1129    // hyperkzg. Following the paper directly, we must compute:
1130    // let L0 = C_B - vk.G * B_u[0] + W[0] * u[0];
1131    // let L1 = C_B - vk.G * B_u[1] + W[1] * u[1];
1132    // let L2 = C_B - vk.G * B_u[2] + W[2] * u[2];
1133    // let R0 = -W[0];
1134    // let R1 = -W[1];
1135    // let R2 = -W[2];
1136    // let L = L0 + L1*d_0 + L2*d_1;
1137    // let R = R0 + R1*d_0 + R2*d_1;
1138    //
1139    // We group terms to reduce the number of scalar mults (to seven):
1140    // In Rust, we could use MSMs for these, and speed up verification.
1141    //
1142    // Note, that while computing L, the intermediate computation of C_B together with computing
1143    // L0, L1, L2 can be replaced by single MSM of C with the powers of q multiplied by (1 + d_0 + d_1)
1144    // with additionally concatenated inputs for scalars/bases.
1145
1146    let q_power_multiplier = E::Scalar::ONE + d_0 + d_1;
1147
1148    let q_powers_multiplied: Vec<E::Scalar> =
1149      iter::successors(Some(q_power_multiplier), |qi| Some(*qi * q))
1150        .take(ell)
1151        .collect();
1152
1153    // Compute the batched openings
1154    // compute B(u_i) = v[i][0] + q*v[i][1] + ... + q^(t-1) * v[i][t-1]
1155    let B_u = (0..3)
1156      .into_par_iter()
1157      .map(|i| {
1158        pi.v
1159          .iter()
1160          .rev()
1161          .fold(E::Scalar::ZERO, |acc, v_j| acc * q + v_j[i])
1162      })
1163      .collect::<Vec<E::Scalar>>();
1164
1165    let L = E::GE::vartime_multiscalar_mul(
1166      &[
1167        &q_powers_multiplied[..],
1168        &[
1169          u[0],
1170          (u[1] * d_0),
1171          (u[2] * d_1),
1172          -(B_u[0] + d_0 * B_u[1] + d_1 * B_u[2]),
1173        ],
1174      ]
1175      .concat(),
1176      &[
1177        &[C.comm.affine()][..],
1178        &pi.com,
1179        &pi.w,
1180        slice::from_ref(&vk.G),
1181      ]
1182      .concat(),
1183    );
1184
1185    let R0 = E::GE::group(&pi.w[0]);
1186    let R1 = E::GE::group(&pi.w[1]);
1187    let R2 = E::GE::group(&pi.w[2]);
1188    let R = R0 + R1 * d_0 + R2 * d_1;
1189
1190    // Check that e(L, vk.H) == e(R, vk.tau_H)
1191    if (E::GE::pairing(&L, &DlogGroup::group(&vk.H)))
1192      != (E::GE::pairing(&R, &DlogGroup::group(&vk.tau_H)))
1193    {
1194      return Err(NovaError::ProofVerifyError {
1195        reason: "Pairing check failed".to_string(),
1196      });
1197    }
1198
1199    Ok(())
1200  }
1201}
1202
1203#[cfg(test)]
1204mod tests {
1205  use super::*;
1206  #[cfg(feature = "io")]
1207  use crate::provider::hyperkzg;
1208  use crate::{
1209    provider::{keccak::Keccak256Transcript, Bn256EngineKZG},
1210    spartan::polys::multilinear::MultilinearPolynomial,
1211  };
1212  use rand::SeedableRng;
1213  #[cfg(feature = "io")]
1214  use std::{
1215    fs::OpenOptions,
1216    io::{BufReader, BufWriter},
1217  };
1218
1219  type E = Bn256EngineKZG;
1220  type Fr = <E as Engine>::Scalar;
1221
1222  #[test]
1223  fn test_hyperkzg_eval() {
1224    // Test with poly(X1, X2) = 1 + X1 + X2 + X1*X2
1225    let n = 4;
1226    let ck: CommitmentKey<E> = CommitmentEngine::setup(b"test", n).unwrap();
1227    let (pk, vk): (ProverKey<E>, VerifierKey<E>) = EvaluationEngine::setup(&ck).unwrap();
1228
1229    // poly is in eval. representation; evaluated at [(0,0), (0,1), (1,0), (1,1)]
1230    let poly = vec![Fr::from(1), Fr::from(2), Fr::from(2), Fr::from(4)];
1231
1232    let C = CommitmentEngine::commit(&ck, &poly, &<E as Engine>::Scalar::ZERO);
1233
1234    let test_inner = |point: Vec<Fr>, eval: Fr| -> Result<(), NovaError> {
1235      let mut tr = Keccak256Transcript::new(b"TestEval");
1236      let proof = EvaluationEngine::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).unwrap();
1237      let mut tr = Keccak256Transcript::new(b"TestEval");
1238      EvaluationEngine::verify(&vk, &mut tr, &C, &point, &eval, &proof)
1239    };
1240
1241    // Call the prover with a (point, eval) pair.
1242    // The prover does not recompute so it may produce a proof, but it should not verify
1243    let point = vec![Fr::from(0), Fr::from(0)];
1244    let eval = Fr::ONE;
1245    assert!(test_inner(point, eval).is_ok());
1246
1247    let point = vec![Fr::from(0), Fr::from(1)];
1248    let eval = Fr::from(2);
1249    assert!(test_inner(point, eval).is_ok());
1250
1251    let point = vec![Fr::from(1), Fr::from(1)];
1252    let eval = Fr::from(4);
1253    assert!(test_inner(point, eval).is_ok());
1254
1255    let point = vec![Fr::from(0), Fr::from(2)];
1256    let eval = Fr::from(3);
1257    assert!(test_inner(point, eval).is_ok());
1258
1259    let point = vec![Fr::from(2), Fr::from(2)];
1260    let eval = Fr::from(9);
1261    assert!(test_inner(point, eval).is_ok());
1262
1263    // Try a couple incorrect evaluations and expect failure
1264    let point = vec![Fr::from(2), Fr::from(2)];
1265    let eval = Fr::from(50);
1266    assert!(test_inner(point, eval).is_err());
1267
1268    let point = vec![Fr::from(0), Fr::from(2)];
1269    let eval = Fr::from(4);
1270    assert!(test_inner(point, eval).is_err());
1271  }
1272
1273  #[test]
1274  #[cfg(not(feature = "evm"))]
1275  fn test_hyperkzg_small() {
1276    let n = 4;
1277
1278    // poly = [1, 2, 1, 4]
1279    let poly = vec![Fr::ONE, Fr::from(2), Fr::from(1), Fr::from(4)];
1280
1281    // point = [4,3]
1282    let point = vec![Fr::from(4), Fr::from(3)];
1283
1284    // eval = 28
1285    let eval = Fr::from(28);
1286
1287    let ck: CommitmentKey<E> = CommitmentEngine::setup(b"test", n).unwrap();
1288    let (pk, vk) = EvaluationEngine::setup(&ck).unwrap();
1289
1290    // make a commitment
1291    let C = CommitmentEngine::commit(&ck, &poly, &<E as Engine>::Scalar::ZERO);
1292
1293    // prove an evaluation
1294    let mut prover_transcript = Keccak256Transcript::new(b"TestEval");
1295    let proof =
1296      EvaluationEngine::<E>::prove(&ck, &pk, &mut prover_transcript, &C, &poly, &point, &eval)
1297        .unwrap();
1298    let post_c_p = prover_transcript.squeeze(b"c").unwrap();
1299
1300    // verify the evaluation
1301    let mut verifier_transcript = Keccak256Transcript::new(b"TestEval");
1302    assert!(
1303      EvaluationEngine::verify(&vk, &mut verifier_transcript, &C, &point, &eval, &proof).is_ok()
1304    );
1305    let post_c_v = verifier_transcript.squeeze(b"c").unwrap();
1306
1307    // check if the prover transcript and verifier transcript are kept in the same state
1308    assert_eq!(post_c_p, post_c_v);
1309
1310    let config = bincode::config::legacy()
1311      .with_big_endian()
1312      .with_fixed_int_encoding();
1313    let proof_bytes =
1314      bincode::serde::encode_to_vec(&proof, config).expect("Failed to serialize proof");
1315
1316    assert_eq!(
1317      proof_bytes.len(),
1318      if cfg!(feature = "evm") { 464 } else { 336 }
1319    );
1320
1321    // Change the proof and expect verification to fail
1322    let mut bad_proof = proof.clone();
1323    let v1 = bad_proof.v[1];
1324    bad_proof.v[0].clone_from(&v1);
1325    let mut verifier_transcript2 = Keccak256Transcript::new(b"TestEval");
1326    assert!(EvaluationEngine::verify(
1327      &vk,
1328      &mut verifier_transcript2,
1329      &C,
1330      &point,
1331      &eval,
1332      &bad_proof
1333    )
1334    .is_err());
1335  }
1336
1337  #[test]
1338  fn test_hyperkzg_large() {
1339    // test the hyperkzg prover and verifier with random instances (derived from a seed)
1340    for ell in [4, 5, 6] {
1341      let mut rng = rand::rngs::StdRng::seed_from_u64(ell as u64);
1342
1343      let n = 1 << ell; // n = 2^ell
1344
1345      let poly = (0..n).map(|_| Fr::random(&mut rng)).collect::<Vec<_>>();
1346      let point = (0..ell).map(|_| Fr::random(&mut rng)).collect::<Vec<_>>();
1347      let eval = MultilinearPolynomial::evaluate_with(&poly, &point);
1348
1349      let ck: CommitmentKey<E> = CommitmentEngine::setup(b"test", n).unwrap();
1350      let (pk, vk) = EvaluationEngine::setup(&ck).unwrap();
1351
1352      // make a commitment
1353      let C = CommitmentEngine::commit(&ck, &poly, &<E as Engine>::Scalar::ZERO);
1354
1355      // prove an evaluation
1356      let mut prover_transcript = Keccak256Transcript::new(b"TestEval");
1357      let proof: EvaluationArgument<E> =
1358        EvaluationEngine::prove(&ck, &pk, &mut prover_transcript, &C, &poly, &point, &eval)
1359          .unwrap();
1360
1361      // verify the evaluation
1362      let mut verifier_tr = Keccak256Transcript::new(b"TestEval");
1363      assert!(EvaluationEngine::verify(&vk, &mut verifier_tr, &C, &point, &eval, &proof).is_ok());
1364
1365      // Change the proof and expect verification to fail
1366      let mut bad_proof = proof.clone();
1367      let v1 = bad_proof.v[1];
1368      bad_proof.v[0].clone_from(&v1);
1369      let mut verifier_tr2 = Keccak256Transcript::new(b"TestEval");
1370      assert!(
1371        EvaluationEngine::verify(&vk, &mut verifier_tr2, &C, &point, &eval, &bad_proof).is_err()
1372      );
1373    }
1374  }
1375
1376  #[cfg(feature = "io")]
1377  #[ignore = "only available with external ptau files"]
1378  #[test]
1379  fn test_hyperkzg_large_from_file() {
1380    // test the hyperkzg prover and verifier with random instances (derived from a seed)
1381    for ell in [4, 5, 6] {
1382      let mut rng = rand::rngs::StdRng::seed_from_u64(ell as u64);
1383
1384      let n = 1 << ell; // n = 2^ell
1385
1386      let poly = (0..n).map(|_| Fr::random(&mut rng)).collect::<Vec<_>>();
1387      let point = (0..ell).map(|_| Fr::random(&mut rng)).collect::<Vec<_>>();
1388      let eval = MultilinearPolynomial::evaluate_with(&poly, &point);
1389
1390      let mut reader = BufReader::new(std::fs::File::open("/tmp/ppot_0080_13.ptau").unwrap());
1391
1392      let ck: CommitmentKey<E> = CommitmentEngine::load_setup(&mut reader, b"test", n).unwrap();
1393      let (pk, vk) = EvaluationEngine::setup(&ck).unwrap();
1394
1395      // make a commitment
1396      let C = CommitmentEngine::commit(&ck, &poly, &<E as Engine>::Scalar::ZERO);
1397
1398      // prove an evaluation
1399      let mut prover_transcript = Keccak256Transcript::new(b"TestEval");
1400      let proof: EvaluationArgument<E> =
1401        EvaluationEngine::prove(&ck, &pk, &mut prover_transcript, &C, &poly, &point, &eval)
1402          .unwrap();
1403
1404      // verify the evaluation
1405      let mut verifier_tr = Keccak256Transcript::new(b"TestEval");
1406      assert!(EvaluationEngine::verify(&vk, &mut verifier_tr, &C, &point, &eval, &proof).is_ok());
1407
1408      // Change the proof and expect verification to fail
1409      let mut bad_proof = proof.clone();
1410      let v1 = bad_proof.v[1];
1411      bad_proof.v[0].clone_from(&v1);
1412      let mut verifier_tr2 = Keccak256Transcript::new(b"TestEval");
1413      assert!(
1414        EvaluationEngine::verify(&vk, &mut verifier_tr2, &C, &point, &eval, &bad_proof).is_err()
1415      );
1416    }
1417  }
1418
1419  #[test]
1420  fn test_key_gen() {
1421    let n = 100;
1422    let tau = Fr::random(OsRng);
1423    let powers_of_tau = CommitmentKey::<E>::compute_powers_serial(tau, n);
1424    let label = b"test";
1425    let res1 = CommitmentKey::<E>::setup_from_tau_direct(label, &powers_of_tau, tau);
1426    let res2 = CommitmentKey::<E>::setup_from_tau_fixed_base_exp(label, &powers_of_tau);
1427
1428    assert_eq!(res1.ck.len(), res2.ck.len());
1429    assert_eq!(res1.h, res2.h);
1430    assert_eq!(res1.tau_H, res2.tau_H);
1431    for i in 0..res1.ck.len() {
1432      assert_eq!(res1.ck[i], res2.ck[i]);
1433    }
1434  }
1435
1436  #[cfg(feature = "io")]
1437  #[test]
1438  fn test_save_load_ck() {
1439    const BUFFER_SIZE: usize = 64 * 1024;
1440    const LABEL: &[u8] = b"test";
1441
1442    let n = 4;
1443    let filename = "/tmp/kzg_test.ptau";
1444
1445    let ck: CommitmentKey<E> = CommitmentEngine::setup(LABEL, n).unwrap();
1446
1447    let file = OpenOptions::new()
1448      .write(true)
1449      .create(true)
1450      .truncate(true)
1451      .open(filename)
1452      .unwrap();
1453    let mut writer = BufWriter::with_capacity(BUFFER_SIZE, file);
1454
1455    CommitmentEngine::save_setup(&ck, &mut writer).unwrap();
1456
1457    let file = OpenOptions::new().read(true).open(filename).unwrap();
1458
1459    let mut reader = BufReader::new(file);
1460
1461    let read_ck =
1462      hyperkzg::CommitmentEngine::<E>::load_setup(&mut reader, LABEL, ck.ck.len()).unwrap();
1463
1464    assert_eq!(ck.ck.len(), read_ck.ck.len());
1465    assert_eq!(ck.h, read_ck.h);
1466    assert_eq!(ck.tau_H, read_ck.tau_H);
1467    for i in 0..ck.ck.len() {
1468      assert_eq!(ck.ck[i], read_ck.ck[i]);
1469    }
1470  }
1471
1472  #[cfg(feature = "io")]
1473  #[ignore = "only available with external ptau files"]
1474  #[test]
1475  fn test_load_ptau() {
1476    let filename = "/tmp/ppot_0080_13.ptau";
1477    let file = OpenOptions::new().read(true).open(filename).unwrap();
1478
1479    let mut reader = BufReader::new(file);
1480
1481    let ck = hyperkzg::CommitmentEngine::<E>::load_setup(&mut reader, b"test", 1).unwrap();
1482
1483    let mut rng = rand::thread_rng();
1484
1485    let gen_g1 = ck.ck[0];
1486    let t_g2 = ck.tau_H;
1487
1488    for _ in 0..1000 {
1489      let x = Fr::from(<rand::rngs::ThreadRng as rand::Rng>::gen::<u64>(&mut rng));
1490      let x = x * x * x * x;
1491
1492      let left = halo2curves::bn256::G1::pairing(&(gen_g1 * x), &t_g2.into());
1493      let right = halo2curves::bn256::G1::pairing(&gen_g1.into(), &t_g2.into()) * x;
1494
1495      assert_eq!(left, right);
1496    }
1497  }
1498}
1499
1500#[cfg(test)]
1501mod evm_tests {
1502  use super::*;
1503  use crate::provider::Bn256EngineKZG;
1504
1505  #[test]
1506  fn test_commitment_evm_serialization() {
1507    type E = Bn256EngineKZG;
1508
1509    let comm = Commitment::<E>::default();
1510    let bytes = bincode::serde::encode_to_vec(comm, bincode::config::legacy()).unwrap();
1511
1512    println!(
1513      "Commitment serialized length in nova-snark: {} bytes",
1514      bytes.len()
1515    );
1516    println!(
1517      "Commitment hex: {}",
1518      hex::encode(&bytes[..std::cmp::min(64, bytes.len())])
1519    );
1520
1521    // Expect 64 bytes for EVM feature, else 32 bytes
1522    assert_eq!(
1523      bytes.len(),
1524      if cfg!(feature = "evm") { 64 } else { 32 },
1525      "Commitment serialization length mismatch"
1526    );
1527  }
1528}