Skip to main content

nova_snark/provider/
hyperkzg.rs

1//! This module implements Nova's evaluation engine using `HyperKZG`, a KZG-based polynomial commitment for multilinear polynomials
2//! HyperKZG is based on the transformation from univariate PCS to multilinear PCS in the Gemini paper (section 2.4.2 in <https://eprint.iacr.org/2022/420.pdf>).
3//! However, there are some key differences:
4//! (1) HyperKZG works with multilinear polynomials represented in evaluation form (rather than in coefficient form in Gemini's transformation).
5//! This means that Spartan's polynomial IOP can use commit to its polynomials as-is without incurring any interpolations or FFTs.
6//! (2) HyperKZG is specialized to use KZG as the univariate commitment scheme, so it includes several optimizations (both during the transformation of multilinear-to-univariate claims
7//! and within the KZG commitment scheme implementation itself).
8#![allow(non_snake_case)]
9#[cfg(feature = "io")]
10use crate::provider::{ptau::PtauFileError, read_ptau, write_ptau};
11use crate::{
12  errors::NovaError,
13  gadgets::utils::to_bignat_repr,
14  provider::{
15    msm::batch_add,
16    traits::{DlogGroup, DlogGroupExt, PairingGroup},
17  },
18  traits::{
19    commitment::{CommitmentEngineTrait, CommitmentTrait, Len},
20    evaluation::EvaluationEngineTrait,
21    evm_serde::EvmCompatSerde,
22    AbsorbInRO2Trait, AbsorbInROTrait, Engine, Group, ROTrait, TranscriptEngineTrait,
23    TranscriptReprTrait,
24  },
25};
26use core::{
27  iter,
28  marker::PhantomData,
29  ops::Range,
30  ops::{Add, Mul, MulAssign},
31  slice,
32};
33use ff::Field;
34#[cfg(any(test, feature = "test-utils"))]
35use ff::PrimeFieldBits;
36use num_integer::Integer;
37use num_traits::ToPrimitive;
38#[cfg(any(test, feature = "test-utils"))]
39use rand_core::OsRng;
40use rayon::prelude::*;
41use serde::{Deserialize, Serialize};
42use serde_with::serde_as;
43
44/// Alias to points on G1 that are in preprocessed form
45type G1Affine<E> = <<E as Engine>::GE as DlogGroup>::AffineGroupElement;
46
47/// Alias to points on G1 that are in preprocessed form
48type G2Affine<E> = <<<E as Engine>::GE as PairingGroup>::G2 as DlogGroup>::AffineGroupElement;
49
50/// Default number of target chunks used in splitting up polynomial division in the kzg_open closure
51const DEFAULT_TARGET_CHUNKS: usize = 1 << 10;
52
53/// KZG commitment key
54#[derive(Debug, Clone, Serialize, Deserialize)]
55pub struct CommitmentKey<E: Engine>
56where
57  E::GE: PairingGroup,
58{
59  ck: Vec<<E::GE as DlogGroup>::AffineGroupElement>,
60  h: <E::GE as DlogGroup>::AffineGroupElement,
61  tau_H: <<E::GE as PairingGroup>::G2 as DlogGroup>::AffineGroupElement, // needed only for the verifier key
62}
63
64impl<E: Engine> CommitmentKey<E>
65where
66  E::GE: PairingGroup,
67{
68  /// Create a new commitment key
69  pub fn new(
70    ck: Vec<<E::GE as DlogGroup>::AffineGroupElement>,
71    h: <E::GE as DlogGroup>::AffineGroupElement,
72    tau_H: <<E::GE as PairingGroup>::G2 as DlogGroup>::AffineGroupElement,
73  ) -> Self {
74    Self { ck, h, tau_H }
75  }
76
77  /// Returns a reference to the ck field
78  pub fn ck(&self) -> &[<E::GE as DlogGroup>::AffineGroupElement] {
79    &self.ck
80  }
81
82  /// Returns a reference to the h field
83  pub fn h(&self) -> &<E::GE as DlogGroup>::AffineGroupElement {
84    &self.h
85  }
86
87  /// Returns a reference to the tau_H field
88  pub fn tau_H(&self) -> &<<E::GE as PairingGroup>::G2 as DlogGroup>::AffineGroupElement {
89    &self.tau_H
90  }
91
92  /// Returns the coordinates of the generator points.
93  ///
94  /// # Panics
95  ///
96  /// Panics if any generator point is the point at infinity.
97  pub fn to_coordinates(&self) -> Vec<(E::Base, E::Base)> {
98    self
99      .ck
100      .par_iter()
101      .map(|c| {
102        let (x, y, is_infinity) = <E::GE as DlogGroup>::group(c).to_coordinates();
103        assert!(!is_infinity);
104        (x, y)
105      })
106      .collect()
107  }
108}
109
110impl<E: Engine> Len for CommitmentKey<E>
111where
112  E::GE: PairingGroup,
113{
114  fn length(&self) -> usize {
115    self.ck.len()
116  }
117}
118
119/// A type that holds blinding generator
120#[serde_as]
121#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
122#[serde(bound = "")]
123pub struct DerandKey<E: Engine>
124where
125  E::GE: DlogGroup,
126{
127  #[serde_as(as = "EvmCompatSerde")]
128  h: <E::GE as DlogGroup>::AffineGroupElement,
129}
130
131/// A KZG commitment
132#[serde_as]
133#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
134#[serde(bound = "")]
135pub struct Commitment<E: Engine>
136where
137  E::GE: PairingGroup,
138{
139  #[serde_as(as = "EvmCompatSerde")]
140  comm: E::GE,
141}
142
143impl<E: Engine> Commitment<E>
144where
145  E::GE: PairingGroup,
146{
147  /// Creates a new commitment from the underlying group element
148  pub fn new(comm: E::GE) -> Self {
149    Commitment { comm }
150  }
151  /// Returns the commitment as a group element
152  pub fn into_inner(self) -> E::GE {
153    self.comm
154  }
155}
156
157impl<E: Engine> CommitmentTrait<E> for Commitment<E>
158where
159  E::GE: PairingGroup,
160{
161  fn to_coordinates(&self) -> (E::Base, E::Base, bool) {
162    self.comm.to_coordinates()
163  }
164}
165
166impl<E: Engine> Default for Commitment<E>
167where
168  E::GE: PairingGroup,
169{
170  fn default() -> Self {
171    Commitment {
172      comm: E::GE::zero(),
173    }
174  }
175}
176
177impl<E: Engine> TranscriptReprTrait<E::GE> for Commitment<E>
178where
179  E::GE: PairingGroup,
180{
181  fn to_transcript_bytes(&self) -> Vec<u8> {
182    use crate::traits::Group;
183    let (x, y, is_infinity) = self.comm.to_coordinates();
184    // Get curve parameter B to determine encoding strategy
185    let (_, b, _, _) = E::GE::group_params();
186
187    if b != E::Base::ZERO {
188      // For curves with B!=0 (like BN254 with B=3, Grumpkin with B=-5),
189      // (0, 0) doesn't lie on the curve (since 0 != 0 + 0 + B),
190      // so point at infinity can be safely encoded as (0, 0).
191      let (x, y) = if is_infinity {
192        (E::Base::ZERO, E::Base::ZERO)
193      } else {
194        (x, y)
195      };
196      [x.to_transcript_bytes(), y.to_transcript_bytes()].concat()
197    } else {
198      // For curves with B=0, (0, 0) lies on the curve, so we need the is_infinity flag
199      let is_infinity_byte = (!is_infinity).into();
200      [
201        x.to_transcript_bytes(),
202        y.to_transcript_bytes(),
203        [is_infinity_byte].to_vec(),
204      ]
205      .concat()
206    }
207  }
208}
209
210impl<E: Engine> AbsorbInROTrait<E> for Commitment<E>
211where
212  E::GE: PairingGroup,
213{
214  fn absorb_in_ro(&self, ro: &mut E::RO) {
215    let (x, y, is_infinity) = self.comm.to_coordinates();
216    // When B != 0 (true for BN254, Grumpkin, etc.), (0,0) is not on the curve
217    // so we can use it as a canonical representation for infinity.
218    let (_, b, _, _) = E::GE::group_params();
219    if b != E::Base::ZERO {
220      let (x, y) = if is_infinity {
221        (E::Base::ZERO, E::Base::ZERO)
222      } else {
223        (x, y)
224      };
225      ro.absorb(x);
226      ro.absorb(y);
227    } else {
228      ro.absorb(x);
229      ro.absorb(y);
230      ro.absorb(if is_infinity {
231        E::Base::ONE
232      } else {
233        E::Base::ZERO
234      });
235    }
236  }
237}
238
239impl<E: Engine> AbsorbInRO2Trait<E> for Commitment<E>
240where
241  E::GE: PairingGroup,
242{
243  fn absorb_in_ro2(&self, ro: &mut E::RO2) {
244    let (x, y, is_infinity) = self.comm.to_coordinates();
245    // When B != 0, use (0,0) for infinity
246    let (_, b, _, _) = E::GE::group_params();
247    let (x, y) = if b != E::Base::ZERO && is_infinity {
248      (E::Base::ZERO, E::Base::ZERO)
249    } else {
250      (x, y)
251    };
252
253    // we have to absorb x and y in big num format
254    let limbs_x = to_bignat_repr(&x);
255    let limbs_y = to_bignat_repr(&y);
256
257    for limb in limbs_x.iter().chain(limbs_y.iter()) {
258      ro.absorb(*limb);
259    }
260    // Only absorb is_infinity when B == 0
261    if b == E::Base::ZERO {
262      ro.absorb(if is_infinity {
263        E::Scalar::ONE
264      } else {
265        E::Scalar::ZERO
266      });
267    }
268  }
269}
270
271impl<E: Engine> MulAssign<E::Scalar> for Commitment<E>
272where
273  E::GE: PairingGroup,
274{
275  fn mul_assign(&mut self, scalar: E::Scalar) {
276    let result = self.comm * scalar;
277    *self = Commitment { comm: result };
278  }
279}
280
281impl<'b, E: Engine> Mul<&'b E::Scalar> for &'_ Commitment<E>
282where
283  E::GE: PairingGroup,
284{
285  type Output = Commitment<E>;
286
287  fn mul(self, scalar: &'b E::Scalar) -> Commitment<E> {
288    Commitment {
289      comm: self.comm * scalar,
290    }
291  }
292}
293
294impl<E: Engine> Mul<E::Scalar> for Commitment<E>
295where
296  E::GE: PairingGroup,
297{
298  type Output = Commitment<E>;
299
300  fn mul(self, scalar: E::Scalar) -> Commitment<E> {
301    Commitment {
302      comm: self.comm * scalar,
303    }
304  }
305}
306
307impl<E: Engine> Add for Commitment<E>
308where
309  E::GE: PairingGroup,
310{
311  type Output = Commitment<E>;
312
313  fn add(self, other: Commitment<E>) -> Commitment<E> {
314    Commitment {
315      comm: self.comm + other.comm,
316    }
317  }
318}
319
320/// Provides a commitment engine
321#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
322pub struct CommitmentEngine<E: Engine> {
323  _p: PhantomData<E>,
324}
325
326/// Test-only methods for generating commitment keys with known tau.
327/// These methods are insecure for production use - use `load_setup` with ptau files instead.
328#[cfg(any(test, feature = "test-utils"))]
329impl<E: Engine> CommitmentKey<E>
330where
331  E::GE: PairingGroup,
332{
333  /// NOTE: this is for testing purposes and should not be used in production.
334  /// In production, use [`PublicParams::setup_with_ptau_dir`] or
335  /// [`R1CSShape::commitment_key_from_ptau_dir`] with ptau files from a trusted setup ceremony.
336  ///
337  /// This generates a commitment key with a random tau value. Since the caller
338  /// (or anyone with access to the RNG state) knows tau, this is insecure.
339  pub fn setup_from_rng(label: &'static [u8], n: usize, rng: impl rand_core::RngCore) -> Self {
340    const T1: usize = 1 << 16;
341    const T2: usize = 100_000;
342
343    let num_gens = n.next_power_of_two();
344
345    let tau = E::Scalar::random(rng);
346
347    let powers_of_tau = if num_gens < T1 {
348      Self::compute_powers_serial(tau, num_gens)
349    } else {
350      Self::compute_powers_par(tau, num_gens)
351    };
352
353    if num_gens < T2 {
354      Self::setup_from_tau_direct(label, &powers_of_tau, tau)
355    } else {
356      Self::setup_from_tau_fixed_base_exp(label, &powers_of_tau)
357    }
358  }
359
360  fn setup_from_tau_fixed_base_exp(label: &'static [u8], powers_of_tau: &[E::Scalar]) -> Self {
361    let tau = powers_of_tau[1];
362
363    let gen = <E::GE as DlogGroup>::gen();
364
365    let ck = fixed_base_exp_comb_batch::<4, 16, 64, 2, 32, _>(gen, powers_of_tau);
366    let ck = ck.par_iter().map(|p| p.affine()).collect();
367
368    let h = *E::GE::from_label(label, 1).first().unwrap();
369
370    let tau_H = (<<E::GE as PairingGroup>::G2 as DlogGroup>::gen() * tau).affine();
371
372    Self { ck, h, tau_H }
373  }
374
375  fn setup_from_tau_direct(
376    label: &'static [u8],
377    powers_of_tau: &[E::Scalar],
378    tau: E::Scalar,
379  ) -> Self {
380    let num_gens = powers_of_tau.len();
381
382    let ck: Vec<G1Affine<E>> = (0..num_gens)
383      .into_par_iter()
384      .map(|i| (<E::GE as DlogGroup>::gen() * powers_of_tau[i]).affine())
385      .collect();
386
387    let h = *E::GE::from_label(label, 1).first().unwrap();
388
389    let tau_H = (<<E::GE as PairingGroup>::G2 as DlogGroup>::gen() * tau).affine();
390
391    Self { ck, h, tau_H }
392  }
393
394  fn compute_powers_serial(tau: E::Scalar, n: usize) -> Vec<E::Scalar> {
395    let mut powers_of_tau = Vec::with_capacity(n);
396    powers_of_tau.insert(0, E::Scalar::ONE);
397    for i in 1..n {
398      powers_of_tau.insert(i, powers_of_tau[i - 1] * tau);
399    }
400    powers_of_tau
401  }
402
403  fn compute_powers_par(tau: E::Scalar, n: usize) -> Vec<E::Scalar> {
404    let num_threads = rayon::current_num_threads();
405    (0..n)
406      .collect::<Vec<_>>()
407      .par_chunks(std::cmp::max(n / num_threads, 1))
408      .into_par_iter()
409      .map(|sub_list| {
410        let mut res = Vec::with_capacity(sub_list.len());
411        res.push(tau.pow([sub_list[0] as u64]));
412        for i in 1..sub_list.len() {
413          res.push(res[i - 1] * tau);
414        }
415        res
416      })
417      .flatten()
418      .collect::<Vec<_>>()
419  }
420}
421
422// * Implementation of https://www.weimerskirch.org/files/Weimerskirch_FixedBase.pdf
423// Only used by test-only setup code
424#[cfg(any(test, feature = "test-utils"))]
425fn fixed_base_exp_comb_batch<
426  const H: usize,
427  const POW_2_H: usize,
428  const A: usize,
429  const B: usize,
430  const V: usize,
431  G: DlogGroup,
432>(
433  gen: G,
434  scalars: &[G::Scalar],
435) -> Vec<G> {
436  assert_eq!(1 << H, POW_2_H);
437  assert_eq!(A, V * B);
438  assert!(A <= 64);
439
440  let zero = G::zero();
441  let one = gen;
442
443  let gi = {
444    let mut res = [one; H];
445    for i in 1..H {
446      let prod = (0..A).fold(res[i - 1], |acc, _| acc + acc);
447      res[i] = prod;
448    }
449    res
450  };
451
452  let mut precompute_res = (1..POW_2_H)
453    .into_par_iter()
454    .map(|i| {
455      let mut res = [zero; V];
456
457      // * G[0][i]
458      let mut g_0_i = zero;
459      for (j, item) in gi.iter().enumerate().take(H) {
460        if (1 << j) & i > 0 {
461          g_0_i += item;
462        }
463      }
464
465      res[0] = g_0_i;
466
467      // * G[j][i]
468      for j in 1..V {
469        res[j] = (0..B).fold(res[j - 1], |acc, _| acc + acc);
470      }
471
472      res
473    })
474    .collect::<Vec<_>>();
475
476  precompute_res.insert(0, [zero; V]);
477
478  let precomputed_g: [_; POW_2_H] = std::array::from_fn(|j| precompute_res[j]);
479
480  let zero = G::zero();
481
482  scalars
483    .par_iter()
484    .map(|e| {
485      let mut a = zero;
486      let mut bits = e.to_le_bits().into_iter().collect::<Vec<_>>();
487
488      while bits.len() % A != 0 {
489        bits.push(false);
490      }
491
492      for k in (0..B).rev() {
493        a += a;
494        for j in (0..V).rev() {
495          let i_j_k = (0..H)
496            .map(|h| {
497              let b = bits[h * A + j * B + k];
498              (1 << h) * b as usize
499            })
500            .sum::<usize>();
501
502          if i_j_k > 0 {
503            a += precomputed_g[i_j_k][j];
504          }
505        }
506      }
507
508      a
509    })
510    .collect::<Vec<_>>()
511}
512
513impl<E: Engine> CommitmentEngineTrait<E> for CommitmentEngine<E>
514where
515  E::GE: PairingGroup,
516{
517  type Commitment = Commitment<E>;
518  type CommitmentKey = CommitmentKey<E>;
519  type DerandKey = DerandKey<E>;
520
521  /// Generate a commitment key with a random tau value.
522  ///
523  /// # Availability
524  ///
525  /// This method is only available in test builds or when the `test-utils` feature is enabled.
526  /// In production builds, this will return an error.
527  ///
528  /// # Security Warning
529  ///
530  /// This method generates an **insecure** commitment key using a random tau.
531  /// The security of HyperKZG relies on no one knowing the secret tau value.
532  ///
533  /// **For production use**, call [`PublicParams::setup_with_ptau_dir`] or
534  /// [`R1CSShape::commitment_key_from_ptau_dir`] to load commitment keys from
535  /// Powers of Tau ceremony files (e.g., from the Ethereum PPOT ceremony).
536  ///
537  /// # For downstream crates
538  ///
539  /// If you need access to this method in your tests, add `nova-snark` with the
540  /// `test-utils` feature to your `dev-dependencies`:
541  ///
542  /// ```toml
543  /// [dev-dependencies]
544  /// nova-snark = { version = "...", features = ["test-utils"] }
545  /// ```
546  #[cfg(any(test, feature = "test-utils"))]
547  fn setup(label: &'static [u8], n: usize) -> Result<Self::CommitmentKey, NovaError> {
548    Ok(Self::CommitmentKey::setup_from_rng(label, n, OsRng))
549  }
550
551  #[cfg(not(any(test, feature = "test-utils")))]
552  fn setup(_label: &'static [u8], _n: usize) -> Result<Self::CommitmentKey, NovaError> {
553    Err(NovaError::SetupError(
554      "HyperKZG::setup is disabled in production builds. \
555       Use PublicParams::setup_with_ptau_dir or R1CSShape::commitment_key_from_ptau_dir \
556       with ptau files from a trusted setup ceremony. \
557       For tests, enable the 'test-utils' feature."
558        .to_string(),
559    ))
560  }
561
562  fn derand_key(ck: &Self::CommitmentKey) -> Self::DerandKey {
563    Self::DerandKey { h: ck.h }
564  }
565
566  fn commit(ck: &Self::CommitmentKey, v: &[E::Scalar], r: &E::Scalar) -> Self::Commitment {
567    assert!(ck.ck.len() >= v.len());
568
569    Commitment {
570      comm: E::GE::vartime_multiscalar_mul(v, &ck.ck[..v.len()])
571        + <E::GE as DlogGroup>::group(&ck.h) * r,
572    }
573  }
574
575  fn batch_commit(
576    ck: &Self::CommitmentKey,
577    v: &[Vec<<E as Engine>::Scalar>],
578    r: &[<E as Engine>::Scalar],
579  ) -> Vec<Self::Commitment> {
580    assert!(v.len() == r.len());
581
582    let max = v.iter().map(|v| v.len()).max().unwrap_or(0);
583    assert!(ck.ck.len() >= max);
584
585    let h = <E::GE as DlogGroup>::group(&ck.h);
586
587    E::GE::batch_vartime_multiscalar_mul(v, &ck.ck[..max])
588      .par_iter()
589      .zip(r.par_iter())
590      .map(|(commit, r_i)| Commitment {
591        comm: *commit + (h * r_i),
592      })
593      .collect()
594  }
595
596  fn commit_small<T: Integer + Into<u64> + Copy + Sync + ToPrimitive>(
597    ck: &Self::CommitmentKey,
598    v: &[T],
599    r: &E::Scalar,
600  ) -> Self::Commitment {
601    assert!(ck.ck.len() >= v.len());
602    Commitment {
603      comm: E::GE::vartime_multiscalar_mul_small(v, &ck.ck[..v.len()])
604        + <E::GE as DlogGroup>::group(&ck.h) * r,
605    }
606  }
607
608  fn batch_commit_small<T: Integer + Into<u64> + Copy + Sync + ToPrimitive>(
609    ck: &Self::CommitmentKey,
610    v: &[Vec<T>],
611    r: &[E::Scalar],
612  ) -> Vec<Self::Commitment> {
613    assert!(v.len() == r.len());
614
615    let max = v.iter().map(|v| v.len()).max().unwrap_or(0);
616    assert!(ck.ck.len() >= max);
617
618    let h = <E::GE as DlogGroup>::group(&ck.h);
619
620    E::GE::batch_vartime_multiscalar_mul_small(v, &ck.ck[..max])
621      .iter()
622      .zip(r.iter())
623      .map(|(commit, r_i)| Commitment {
624        comm: *commit + (h * r_i),
625      })
626      .collect()
627  }
628
629  fn derandomize(
630    dk: &Self::DerandKey,
631    commit: &Self::Commitment,
632    r: &E::Scalar,
633  ) -> Self::Commitment {
634    Commitment {
635      comm: commit.comm - <E::GE as DlogGroup>::group(&dk.h) * r,
636    }
637  }
638
639  #[cfg(feature = "io")]
640  fn load_setup(
641    reader: &mut (impl std::io::Read + std::io::Seek),
642    label: &'static [u8],
643    n: usize,
644  ) -> Result<Self::CommitmentKey, PtauFileError> {
645    let num = n.next_power_of_two();
646
647    // read points as well as check sanity of ptau file
648    let (g1_points, g2_points) = read_ptau(reader, num, 2)?;
649
650    let ck = g1_points.to_vec();
651
652    let tau_H = *g2_points.last().unwrap();
653
654    let h = *E::GE::from_label(label, 1).first().unwrap();
655
656    Ok(CommitmentKey { ck, h, tau_H })
657  }
658
659  /// Save keys
660  #[cfg(feature = "io")]
661  fn save_setup(
662    ck: &Self::CommitmentKey,
663    mut writer: &mut (impl std::io::Write + std::io::Seek),
664  ) -> Result<(), PtauFileError> {
665    let g1_points = ck.ck.clone();
666
667    let g2_points = vec![ck.tau_H, ck.tau_H];
668    let power = g1_points.len().next_power_of_two().trailing_zeros() + 1;
669
670    write_ptau(&mut writer, g1_points, g2_points, power)
671  }
672
673  fn commit_small_range<T: Integer + Into<u64> + Copy + Sync + ToPrimitive>(
674    ck: &Self::CommitmentKey,
675    v: &[T],
676    r: &<E as Engine>::Scalar,
677    range: Range<usize>,
678    max_num_bits: usize,
679  ) -> Self::Commitment {
680    let bases = &ck.ck[range.clone()];
681    let scalars = &v[range];
682
683    assert!(bases.len() == scalars.len());
684
685    let mut res =
686      E::GE::vartime_multiscalar_mul_small_with_max_num_bits(scalars, bases, max_num_bits);
687
688    if r != &E::Scalar::ZERO {
689      res += <E::GE as DlogGroup>::group(&ck.h) * r;
690    }
691
692    Commitment { comm: res }
693  }
694
695  fn ck_to_coordinates(ck: &Self::CommitmentKey) -> Vec<(E::Base, E::Base)> {
696    ck.to_coordinates()
697  }
698
699  fn ck_to_group_elements(ck: &Self::CommitmentKey) -> Vec<E::GE> {
700    ck.ck()
701      .par_iter()
702      .map(|g| {
703        let ge = E::GE::group(g);
704        assert!(
705          ge != E::GE::zero(),
706          "CommitmentKey contains a generator at infinity"
707        );
708        ge
709      })
710      .collect()
711  }
712
713  fn commit_sparse_binary(
714    ck: &Self::CommitmentKey,
715    non_zero_indices: &[usize],
716    r: &<E as Engine>::Scalar,
717  ) -> Self::Commitment {
718    let comm = batch_add(&ck.ck, non_zero_indices);
719    let mut comm = <E::GE as DlogGroup>::group(&comm.into());
720
721    if r != &E::Scalar::ZERO {
722      comm += <E::GE as DlogGroup>::group(&ck.h) * r;
723    }
724
725    Commitment { comm }
726  }
727}
728
729/// Provides an implementation of generators for proving evaluations
730#[derive(Clone, Debug, Serialize, Deserialize)]
731#[serde(bound = "")]
732pub struct ProverKey<E: Engine> {
733  _p: PhantomData<E>,
734}
735
736/// A verifier key
737#[serde_as]
738#[derive(Clone, Debug, Serialize, Deserialize)]
739#[serde(bound = "")]
740pub struct VerifierKey<E: Engine>
741where
742  E::GE: PairingGroup,
743{
744  #[serde_as(as = "EvmCompatSerde")]
745  pub(crate) G: G1Affine<E>,
746  #[serde_as(as = "EvmCompatSerde")]
747  pub(crate) H: G2Affine<E>,
748  #[serde_as(as = "EvmCompatSerde")]
749  pub(crate) tau_H: G2Affine<E>,
750}
751
752/// Provides an implementation of a polynomial evaluation argument
753#[serde_as]
754#[derive(Clone, Debug, Serialize, Deserialize)]
755#[serde(bound = "")]
756pub struct EvaluationArgument<E: Engine>
757where
758  E::GE: PairingGroup,
759{
760  #[serde_as(as = "Vec<EvmCompatSerde>")]
761  com: Vec<G1Affine<E>>,
762  #[serde_as(as = "[EvmCompatSerde; 3]")]
763  w: [G1Affine<E>; 3],
764  #[serde_as(as = "Vec<[EvmCompatSerde; 3]>")]
765  v: Vec<[E::Scalar; 3]>,
766}
767
768impl<E: Engine> EvaluationArgument<E>
769where
770  E::GE: PairingGroup,
771{
772  /// Create a new evaluation argument
773  pub fn new(com: Vec<G1Affine<E>>, w: [G1Affine<E>; 3], v: Vec<[E::Scalar; 3]>) -> Self {
774    Self { com, w, v }
775  }
776  /// The KZG commitments to intermediate polynomials
777  pub fn com(&self) -> &[G1Affine<E>] {
778    &self.com
779  }
780  /// The KZG witnesses for batch openings
781  pub fn w(&self) -> &[G1Affine<E>] {
782    &self.w
783  }
784  /// The evaluations of the polynomials at challenge points
785  pub fn v(&self) -> &[[E::Scalar; 3]] {
786    &self.v
787  }
788}
789
790/// Provides an implementation of a polynomial evaluation engine using KZG
791#[derive(Clone, Debug, Serialize, Deserialize)]
792pub struct EvaluationEngine<E: Engine> {
793  _p: PhantomData<E>,
794}
795
796impl<E: Engine> EvaluationEngine<E>
797where
798  E::GE: PairingGroup,
799{
800  // This impl block defines helper functions that are not a part of
801  // EvaluationEngineTrait, but that we will use to implement the trait methods.
802  fn compute_challenge(com: &[G1Affine<E>], transcript: &mut <E as Engine>::TE) -> E::Scalar {
803    transcript.absorb(b"c", &com.to_vec().as_slice());
804
805    transcript.squeeze(b"c").unwrap()
806  }
807
808  // Compute challenge q = Hash(vk, C0, ..., C_{k-1}, u0, ...., u_{t-1},
809  // (f_i(u_j))_{i=0..k-1,j=0..t-1})
810  fn get_batch_challenge(v: &[[E::Scalar; 3]], transcript: &mut <E as Engine>::TE) -> E::Scalar {
811    transcript.absorb(
812      b"v",
813      &v.iter()
814        .flatten()
815        .cloned()
816        .collect::<Vec<E::Scalar>>()
817        .as_slice(),
818    );
819
820    transcript.squeeze(b"r").unwrap()
821  }
822
823  fn batch_challenge_powers(q: E::Scalar, k: usize) -> Vec<E::Scalar> {
824    // Compute powers of q : (1, q, q^2, ..., q^(k-1))
825    let mut q_powers = vec![E::Scalar::ONE; k];
826    for i in 1..k {
827      q_powers[i] = q_powers[i - 1] * q;
828    }
829    q_powers
830  }
831
832  fn verifier_second_challenge(W: &[G1Affine<E>], transcript: &mut <E as Engine>::TE) -> E::Scalar {
833    transcript.absorb(b"W", &W.to_vec().as_slice());
834
835    transcript.squeeze(b"d").unwrap()
836  }
837}
838
839impl<E> EvaluationEngineTrait<E> for EvaluationEngine<E>
840where
841  E: Engine<CE = CommitmentEngine<E>>,
842  E::GE: PairingGroup,
843{
844  type EvaluationArgument = EvaluationArgument<E>;
845  type ProverKey = ProverKey<E>;
846  type VerifierKey = VerifierKey<E>;
847
848  fn setup(
849    ck: &<E::CE as CommitmentEngineTrait<E>>::CommitmentKey,
850  ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError> {
851    let pk = ProverKey {
852      _p: Default::default(),
853    };
854
855    let vk = VerifierKey {
856      G: E::GE::gen().affine(),
857      H: <<E::GE as PairingGroup>::G2 as DlogGroup>::gen().affine(),
858      tau_H: ck.tau_H,
859    };
860
861    Ok((pk, vk))
862  }
863
864  fn prove(
865    ck: &CommitmentKey<E>,
866    _pk: &Self::ProverKey,
867    transcript: &mut <E as Engine>::TE,
868    _C: &Commitment<E>,
869    hat_P: &[E::Scalar],
870    point: &[E::Scalar],
871    _eval: &E::Scalar,
872  ) -> Result<Self::EvaluationArgument, NovaError> {
873    let x: Vec<E::Scalar> = point.to_vec();
874
875    //////////////// begin helper closures //////////
876    let kzg_open = |f: &[E::Scalar], u: E::Scalar| -> G1Affine<E> {
877      // Divides polynomial f(x) by (x - u) to obtain the witness polynomial h(x) = f(x)/(x - u)
878      // for KZG opening.
879      //
880      // This implementation uses a chunking strategy to enable parallelization:
881      // - Divides the polynomial into chunks
882      // - Processes chunks in parallel using Rayon's par_chunks_exact_mut
883      // - Within each chunk, maintains a "running" partial result
884      // - Combines results across chunk boundaries
885      //
886      // While this adds more total computation than the standard Horner's method,
887      // the parallel execution provides significant speedup for large polynomials.
888      //
889      // Original sequential algorithm using Horner's method:
890      // ```
891      // let mut h = vec![E::Scalar::ZERO; d];
892      // for i in (1..d).rev() {
893      //   h[i - 1] = f[i] + h[i] * u;
894      // }
895      // ```
896      //
897      // The resulting polynomial h(x) satisfies: f(x) = h(x) * (x - u)
898      // The degree of h(x) is one less than the degree of f(x).
899      let div_by_monomial =
900        |f: &[E::Scalar], u: E::Scalar, target_chunks: usize| -> Vec<E::Scalar> {
901          assert!(!f.is_empty());
902          let target_chunk_size = f.len() / target_chunks;
903          let log2_chunk_size = target_chunk_size.max(1).ilog2();
904          let chunk_size = 1 << log2_chunk_size;
905
906          let u_to_the_chunk_size = (0..log2_chunk_size).fold(u, |u_pow, _| u_pow.square());
907          let mut result = f.to_vec();
908          result
909            .par_chunks_mut(chunk_size)
910            .zip(f.par_chunks(chunk_size))
911            .for_each(|(chunk, f_chunk)| {
912              for i in (0..chunk.len() - 1).rev() {
913                chunk[i] = f_chunk[i] + u * chunk[i + 1];
914              }
915            });
916
917          let mut iter = result.chunks_mut(chunk_size).rev();
918          if let Some(last_chunk) = iter.next() {
919            let mut prev_partial = last_chunk[0];
920            for chunk in iter {
921              prev_partial = chunk[0] + u_to_the_chunk_size * prev_partial;
922              chunk[0] = prev_partial;
923            }
924          }
925
926          result[1..]
927            .par_chunks_exact_mut(chunk_size)
928            .rev()
929            .for_each(|chunk| {
930              let mut prev_partial = chunk[chunk_size - 1];
931              for e in chunk.iter_mut().rev().skip(1) {
932                prev_partial *= u;
933                *e += prev_partial;
934              }
935            });
936          result[1..].to_vec()
937        };
938
939      let target_chunks = DEFAULT_TARGET_CHUNKS;
940      let h = &div_by_monomial(f, u, target_chunks);
941
942      E::CE::commit(ck, h, &E::Scalar::ZERO).comm.affine()
943    };
944
945    let kzg_open_batch = |f: &[Vec<E::Scalar>],
946                          u: &[E::Scalar; 3],
947                          transcript: &mut <E as Engine>::TE|
948     -> (Vec<G1Affine<E>>, Vec<[E::Scalar; 3]>) {
949      let poly_eval = |f: &[E::Scalar], u: E::Scalar| -> E::Scalar {
950        // Horner's method
951        let mut acc = E::Scalar::ZERO;
952        for &fi in f.iter().rev() {
953          acc = acc * u + fi;
954        }
955
956        acc
957      };
958
959      let scalar_vector_muladd = |a: &mut Vec<E::Scalar>, v: &Vec<E::Scalar>, s: E::Scalar| {
960        assert!(a.len() >= v.len());
961        a.par_iter_mut().zip(v.par_iter()).for_each(|(a_i, v_i)| {
962          *a_i += s * *v_i;
963        });
964      };
965
966      let kzg_compute_batch_polynomial = |f: &[Vec<E::Scalar>], q: E::Scalar| -> Vec<E::Scalar> {
967        let k = f.len(); // Number of polynomials we're batching
968
969        let q_powers = Self::batch_challenge_powers(q, k);
970
971        // Compute B(x) = f[0] + q*f[1] + q^2 * f[2] + ... q^(k-1) * f[k-1]
972        let mut B = f[0].clone();
973        for i in 1..k {
974          scalar_vector_muladd(&mut B, &f[i], q_powers[i]); // B += q_powers[i] * f[i]
975        }
976
977        B
978      };
979      ///////// END kzg_open_batch closure helpers
980
981      let k = f.len();
982      // Note: u.len() is always 3.
983
984      // The verifier needs f_i(u_j), so we compute them here
985      // (V will compute B(u_j) itself)
986      let mut v = vec![[E::Scalar::ZERO; 3]; k];
987      v.par_iter_mut().zip_eq(f).for_each(|(v_j, f)| {
988        // for each poly f
989        // for each poly f (except the last one - since it is constant)
990        v_j.par_iter_mut().enumerate().for_each(|(i, v_ij)| {
991          // for each point u
992          *v_ij = poly_eval(f, u[i]);
993        });
994      });
995
996      let q = Self::get_batch_challenge(&v, transcript);
997      let B = kzg_compute_batch_polynomial(f, q);
998
999      // Now open B at u0, ..., u_{t-1}
1000      let w = u
1001        .into_par_iter()
1002        .map(|ui| kzg_open(&B, *ui))
1003        .collect::<Vec<G1Affine<E>>>();
1004
1005      // The prover computes the challenge to keep the transcript in the same
1006      // state as that of the verifier
1007      let _d_0 = Self::verifier_second_challenge(&w, transcript);
1008
1009      (w, v)
1010    };
1011
1012    ///// END helper closures //////////
1013
1014    let ell = x.len();
1015    let n = hat_P.len();
1016    assert_eq!(n, 1 << ell); // Below we assume that n is a power of two
1017
1018    // Phase 1  -- create commitments com_1, ..., com_\ell
1019    // We do not compute final Pi (and its commitment) as it is constant and equals to 'eval'
1020    // also known to verifier, so can be derived on its side as well
1021    let mut polys: Vec<Vec<E::Scalar>> = Vec::new();
1022    polys.push(hat_P.to_vec());
1023    for i in 0..ell - 1 {
1024      let Pi_len = polys[i].len() / 2;
1025      let mut Pi = vec![E::Scalar::ZERO; Pi_len];
1026
1027      #[allow(clippy::needless_range_loop)]
1028      Pi.par_iter_mut().enumerate().for_each(|(j, Pi_j)| {
1029        *Pi_j = x[ell - i - 1] * (polys[i][2 * j + 1] - polys[i][2 * j]) + polys[i][2 * j];
1030      });
1031
1032      polys.push(Pi);
1033    }
1034
1035    // We do not need to commit to the first polynomial as it is already committed.
1036    // Compute commitments in parallel
1037    let r = vec![E::Scalar::ZERO; ell - 1];
1038    let com: Vec<G1Affine<E>> = E::CE::batch_commit(ck, &polys[1..], r.as_slice())
1039      .par_iter()
1040      .map(|i| i.comm.affine())
1041      .collect();
1042
1043    // Phase 2
1044    // We do not need to add x to the transcript, because in our context x was obtained from the transcript.
1045    // We also do not need to absorb `C` and `eval` as they are already absorbed by the transcript by the caller
1046    let r = Self::compute_challenge(&com, transcript);
1047    let u = [r, -r, r * r];
1048
1049    // Phase 3 -- create response
1050    let (w, v) = kzg_open_batch(&polys, &u, transcript);
1051
1052    Ok(EvaluationArgument {
1053      com,
1054      w: w.try_into().expect("w should have length 3"),
1055      v,
1056    })
1057  }
1058
1059  /// A method to verify purported evaluations of a batch of polynomials
1060  fn verify(
1061    vk: &Self::VerifierKey,
1062    transcript: &mut <E as Engine>::TE,
1063    C: &Commitment<E>,
1064    x: &[E::Scalar],
1065    y: &E::Scalar,
1066    pi: &Self::EvaluationArgument,
1067  ) -> Result<(), NovaError> {
1068    let ell = x.len();
1069
1070    // we do not need to add x to the transcript, because in our context x was
1071    // obtained from the transcript
1072    let r = Self::compute_challenge(&pi.com, transcript);
1073
1074    let u = [r, -r, r * r];
1075
1076    // Setup vectors (Y, ypos, yneg) from pi.v
1077    if pi.v.len() != ell || pi.com.len() != ell - 1 {
1078      return Err(NovaError::ProofVerifyError {
1079        reason: "Invalid lengths of pi.v".to_string(),
1080      });
1081    }
1082
1083    // Check consistency of (Y, ypos, yneg)
1084    for i in 0..ell {
1085      let ypos = pi.v[i][0];
1086      let yneg = pi.v[i][1];
1087      let Y = pi.v.get(i + 1).map_or(*y, |v| v[2]);
1088      if r.double() * Y
1089        != r * (E::Scalar::ONE - x[ell - i - 1]) * (ypos + yneg) + x[ell - i - 1] * (ypos - yneg)
1090      {
1091        return Err(NovaError::ProofVerifyError {
1092          reason: "Inconsistent (Y, ypos, yneg)".to_string(),
1093        });
1094      }
1095      // Note that we don't make any checks about Y[0] here, but our batching
1096      // check below requires it
1097    }
1098
1099    // Check commitments to (Y, ypos, yneg) are valid
1100
1101    // vk is hashed in transcript already, so we do not add it here
1102
1103    let q = Self::get_batch_challenge(&pi.v, transcript);
1104
1105    let d_0 = Self::verifier_second_challenge(&pi.w, transcript);
1106    let d_1 = d_0.square();
1107
1108    // We write a special case for t=3, since this what is required for
1109    // hyperkzg. Following the paper directly, we must compute:
1110    // let L0 = C_B - vk.G * B_u[0] + W[0] * u[0];
1111    // let L1 = C_B - vk.G * B_u[1] + W[1] * u[1];
1112    // let L2 = C_B - vk.G * B_u[2] + W[2] * u[2];
1113    // let R0 = -W[0];
1114    // let R1 = -W[1];
1115    // let R2 = -W[2];
1116    // let L = L0 + L1*d_0 + L2*d_1;
1117    // let R = R0 + R1*d_0 + R2*d_1;
1118    //
1119    // We group terms to reduce the number of scalar mults (to seven):
1120    // In Rust, we could use MSMs for these, and speed up verification.
1121    //
1122    // Note, that while computing L, the intermediate computation of C_B together with computing
1123    // L0, L1, L2 can be replaced by single MSM of C with the powers of q multiplied by (1 + d_0 + d_1)
1124    // with additionally concatenated inputs for scalars/bases.
1125
1126    let q_power_multiplier = E::Scalar::ONE + d_0 + d_1;
1127
1128    let q_powers_multiplied: Vec<E::Scalar> =
1129      iter::successors(Some(q_power_multiplier), |qi| Some(*qi * q))
1130        .take(ell)
1131        .collect();
1132
1133    // Compute the batched openings
1134    // compute B(u_i) = v[i][0] + q*v[i][1] + ... + q^(t-1) * v[i][t-1]
1135    let B_u = (0..3)
1136      .into_par_iter()
1137      .map(|i| {
1138        pi.v
1139          .iter()
1140          .rev()
1141          .fold(E::Scalar::ZERO, |acc, v_j| acc * q + v_j[i])
1142      })
1143      .collect::<Vec<E::Scalar>>();
1144
1145    let L = E::GE::vartime_multiscalar_mul(
1146      &[
1147        &q_powers_multiplied[..],
1148        &[
1149          u[0],
1150          (u[1] * d_0),
1151          (u[2] * d_1),
1152          -(B_u[0] + d_0 * B_u[1] + d_1 * B_u[2]),
1153        ],
1154      ]
1155      .concat(),
1156      &[
1157        &[C.comm.affine()][..],
1158        &pi.com,
1159        &pi.w,
1160        slice::from_ref(&vk.G),
1161      ]
1162      .concat(),
1163    );
1164
1165    let R0 = E::GE::group(&pi.w[0]);
1166    let R1 = E::GE::group(&pi.w[1]);
1167    let R2 = E::GE::group(&pi.w[2]);
1168    let R = R0 + R1 * d_0 + R2 * d_1;
1169
1170    // Check that e(L, vk.H) == e(R, vk.tau_H)
1171    if (E::GE::pairing(&L, &DlogGroup::group(&vk.H)))
1172      != (E::GE::pairing(&R, &DlogGroup::group(&vk.tau_H)))
1173    {
1174      return Err(NovaError::ProofVerifyError {
1175        reason: "Pairing check failed".to_string(),
1176      });
1177    }
1178
1179    Ok(())
1180  }
1181}
1182
1183#[cfg(test)]
1184mod tests {
1185  use super::*;
1186  #[cfg(feature = "io")]
1187  use crate::provider::hyperkzg;
1188  use crate::{
1189    provider::{keccak::Keccak256Transcript, Bn256EngineKZG},
1190    spartan::polys::multilinear::MultilinearPolynomial,
1191  };
1192  use rand::SeedableRng;
1193  #[cfg(feature = "io")]
1194  use std::{
1195    fs::OpenOptions,
1196    io::{BufReader, BufWriter},
1197  };
1198
1199  type E = Bn256EngineKZG;
1200  type Fr = <E as Engine>::Scalar;
1201
1202  #[test]
1203  fn test_hyperkzg_eval() {
1204    // Test with poly(X1, X2) = 1 + X1 + X2 + X1*X2
1205    let n = 4;
1206    let ck: CommitmentKey<E> = CommitmentEngine::setup(b"test", n).unwrap();
1207    let (pk, vk): (ProverKey<E>, VerifierKey<E>) = EvaluationEngine::setup(&ck).unwrap();
1208
1209    // poly is in eval. representation; evaluated at [(0,0), (0,1), (1,0), (1,1)]
1210    let poly = vec![Fr::from(1), Fr::from(2), Fr::from(2), Fr::from(4)];
1211
1212    let C = CommitmentEngine::commit(&ck, &poly, &<E as Engine>::Scalar::ZERO);
1213
1214    let test_inner = |point: Vec<Fr>, eval: Fr| -> Result<(), NovaError> {
1215      let mut tr = Keccak256Transcript::new(b"TestEval");
1216      let proof = EvaluationEngine::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).unwrap();
1217      let mut tr = Keccak256Transcript::new(b"TestEval");
1218      EvaluationEngine::verify(&vk, &mut tr, &C, &point, &eval, &proof)
1219    };
1220
1221    // Call the prover with a (point, eval) pair.
1222    // The prover does not recompute so it may produce a proof, but it should not verify
1223    let point = vec![Fr::from(0), Fr::from(0)];
1224    let eval = Fr::ONE;
1225    assert!(test_inner(point, eval).is_ok());
1226
1227    let point = vec![Fr::from(0), Fr::from(1)];
1228    let eval = Fr::from(2);
1229    assert!(test_inner(point, eval).is_ok());
1230
1231    let point = vec![Fr::from(1), Fr::from(1)];
1232    let eval = Fr::from(4);
1233    assert!(test_inner(point, eval).is_ok());
1234
1235    let point = vec![Fr::from(0), Fr::from(2)];
1236    let eval = Fr::from(3);
1237    assert!(test_inner(point, eval).is_ok());
1238
1239    let point = vec![Fr::from(2), Fr::from(2)];
1240    let eval = Fr::from(9);
1241    assert!(test_inner(point, eval).is_ok());
1242
1243    // Try a couple incorrect evaluations and expect failure
1244    let point = vec![Fr::from(2), Fr::from(2)];
1245    let eval = Fr::from(50);
1246    assert!(test_inner(point, eval).is_err());
1247
1248    let point = vec![Fr::from(0), Fr::from(2)];
1249    let eval = Fr::from(4);
1250    assert!(test_inner(point, eval).is_err());
1251  }
1252
1253  #[test]
1254  #[cfg(not(feature = "evm"))]
1255  fn test_hyperkzg_small() {
1256    let n = 4;
1257
1258    // poly = [1, 2, 1, 4]
1259    let poly = vec![Fr::ONE, Fr::from(2), Fr::from(1), Fr::from(4)];
1260
1261    // point = [4,3]
1262    let point = vec![Fr::from(4), Fr::from(3)];
1263
1264    // eval = 28
1265    let eval = Fr::from(28);
1266
1267    let ck: CommitmentKey<E> = CommitmentEngine::setup(b"test", n).unwrap();
1268    let (pk, vk) = EvaluationEngine::setup(&ck).unwrap();
1269
1270    // make a commitment
1271    let C = CommitmentEngine::commit(&ck, &poly, &<E as Engine>::Scalar::ZERO);
1272
1273    // prove an evaluation
1274    let mut prover_transcript = Keccak256Transcript::new(b"TestEval");
1275    let proof =
1276      EvaluationEngine::<E>::prove(&ck, &pk, &mut prover_transcript, &C, &poly, &point, &eval)
1277        .unwrap();
1278    let post_c_p = prover_transcript.squeeze(b"c").unwrap();
1279
1280    // verify the evaluation
1281    let mut verifier_transcript = Keccak256Transcript::new(b"TestEval");
1282    assert!(
1283      EvaluationEngine::verify(&vk, &mut verifier_transcript, &C, &point, &eval, &proof).is_ok()
1284    );
1285    let post_c_v = verifier_transcript.squeeze(b"c").unwrap();
1286
1287    // check if the prover transcript and verifier transcript are kept in the same state
1288    assert_eq!(post_c_p, post_c_v);
1289
1290    let config = bincode::config::legacy()
1291      .with_big_endian()
1292      .with_fixed_int_encoding();
1293    let proof_bytes =
1294      bincode::serde::encode_to_vec(&proof, config).expect("Failed to serialize proof");
1295
1296    assert_eq!(
1297      proof_bytes.len(),
1298      if cfg!(feature = "evm") { 464 } else { 336 }
1299    );
1300
1301    // Change the proof and expect verification to fail
1302    let mut bad_proof = proof.clone();
1303    let v1 = bad_proof.v[1];
1304    bad_proof.v[0].clone_from(&v1);
1305    let mut verifier_transcript2 = Keccak256Transcript::new(b"TestEval");
1306    assert!(EvaluationEngine::verify(
1307      &vk,
1308      &mut verifier_transcript2,
1309      &C,
1310      &point,
1311      &eval,
1312      &bad_proof
1313    )
1314    .is_err());
1315  }
1316
1317  #[test]
1318  fn test_hyperkzg_large() {
1319    // test the hyperkzg prover and verifier with random instances (derived from a seed)
1320    for ell in [4, 5, 6] {
1321      let mut rng = rand::rngs::StdRng::seed_from_u64(ell as u64);
1322
1323      let n = 1 << ell; // n = 2^ell
1324
1325      let poly = (0..n).map(|_| Fr::random(&mut rng)).collect::<Vec<_>>();
1326      let point = (0..ell).map(|_| Fr::random(&mut rng)).collect::<Vec<_>>();
1327      let eval = MultilinearPolynomial::evaluate_with(&poly, &point);
1328
1329      let ck: CommitmentKey<E> = CommitmentEngine::setup(b"test", n).unwrap();
1330      let (pk, vk) = EvaluationEngine::setup(&ck).unwrap();
1331
1332      // make a commitment
1333      let C = CommitmentEngine::commit(&ck, &poly, &<E as Engine>::Scalar::ZERO);
1334
1335      // prove an evaluation
1336      let mut prover_transcript = Keccak256Transcript::new(b"TestEval");
1337      let proof: EvaluationArgument<E> =
1338        EvaluationEngine::prove(&ck, &pk, &mut prover_transcript, &C, &poly, &point, &eval)
1339          .unwrap();
1340
1341      // verify the evaluation
1342      let mut verifier_tr = Keccak256Transcript::new(b"TestEval");
1343      assert!(EvaluationEngine::verify(&vk, &mut verifier_tr, &C, &point, &eval, &proof).is_ok());
1344
1345      // Change the proof and expect verification to fail
1346      let mut bad_proof = proof.clone();
1347      let v1 = bad_proof.v[1];
1348      bad_proof.v[0].clone_from(&v1);
1349      let mut verifier_tr2 = Keccak256Transcript::new(b"TestEval");
1350      assert!(
1351        EvaluationEngine::verify(&vk, &mut verifier_tr2, &C, &point, &eval, &bad_proof).is_err()
1352      );
1353    }
1354  }
1355
1356  #[cfg(feature = "io")]
1357  #[ignore = "only available with external ptau files"]
1358  #[test]
1359  fn test_hyperkzg_large_from_file() {
1360    // test the hyperkzg prover and verifier with random instances (derived from a seed)
1361    for ell in [4, 5, 6] {
1362      let mut rng = rand::rngs::StdRng::seed_from_u64(ell as u64);
1363
1364      let n = 1 << ell; // n = 2^ell
1365
1366      let poly = (0..n).map(|_| Fr::random(&mut rng)).collect::<Vec<_>>();
1367      let point = (0..ell).map(|_| Fr::random(&mut rng)).collect::<Vec<_>>();
1368      let eval = MultilinearPolynomial::evaluate_with(&poly, &point);
1369
1370      let mut reader = BufReader::new(std::fs::File::open("/tmp/ppot_0080_13.ptau").unwrap());
1371
1372      let ck: CommitmentKey<E> = CommitmentEngine::load_setup(&mut reader, b"test", n).unwrap();
1373      let (pk, vk) = EvaluationEngine::setup(&ck).unwrap();
1374
1375      // make a commitment
1376      let C = CommitmentEngine::commit(&ck, &poly, &<E as Engine>::Scalar::ZERO);
1377
1378      // prove an evaluation
1379      let mut prover_transcript = Keccak256Transcript::new(b"TestEval");
1380      let proof: EvaluationArgument<E> =
1381        EvaluationEngine::prove(&ck, &pk, &mut prover_transcript, &C, &poly, &point, &eval)
1382          .unwrap();
1383
1384      // verify the evaluation
1385      let mut verifier_tr = Keccak256Transcript::new(b"TestEval");
1386      assert!(EvaluationEngine::verify(&vk, &mut verifier_tr, &C, &point, &eval, &proof).is_ok());
1387
1388      // Change the proof and expect verification to fail
1389      let mut bad_proof = proof.clone();
1390      let v1 = bad_proof.v[1];
1391      bad_proof.v[0].clone_from(&v1);
1392      let mut verifier_tr2 = Keccak256Transcript::new(b"TestEval");
1393      assert!(
1394        EvaluationEngine::verify(&vk, &mut verifier_tr2, &C, &point, &eval, &bad_proof).is_err()
1395      );
1396    }
1397  }
1398
1399  #[test]
1400  fn test_key_gen() {
1401    let n = 100;
1402    let tau = Fr::random(OsRng);
1403    let powers_of_tau = CommitmentKey::<E>::compute_powers_serial(tau, n);
1404    let label = b"test";
1405    let res1 = CommitmentKey::<E>::setup_from_tau_direct(label, &powers_of_tau, tau);
1406    let res2 = CommitmentKey::<E>::setup_from_tau_fixed_base_exp(label, &powers_of_tau);
1407
1408    assert_eq!(res1.ck.len(), res2.ck.len());
1409    assert_eq!(res1.h, res2.h);
1410    assert_eq!(res1.tau_H, res2.tau_H);
1411    for i in 0..res1.ck.len() {
1412      assert_eq!(res1.ck[i], res2.ck[i]);
1413    }
1414  }
1415
1416  #[cfg(feature = "io")]
1417  #[test]
1418  fn test_save_load_ck() {
1419    const BUFFER_SIZE: usize = 64 * 1024;
1420    const LABEL: &[u8] = b"test";
1421
1422    let n = 4;
1423    let filename = "/tmp/kzg_test.ptau";
1424
1425    let ck: CommitmentKey<E> = CommitmentEngine::setup(LABEL, n).unwrap();
1426
1427    let file = OpenOptions::new()
1428      .write(true)
1429      .create(true)
1430      .truncate(true)
1431      .open(filename)
1432      .unwrap();
1433    let mut writer = BufWriter::with_capacity(BUFFER_SIZE, file);
1434
1435    CommitmentEngine::save_setup(&ck, &mut writer).unwrap();
1436
1437    let file = OpenOptions::new().read(true).open(filename).unwrap();
1438
1439    let mut reader = BufReader::new(file);
1440
1441    let read_ck =
1442      hyperkzg::CommitmentEngine::<E>::load_setup(&mut reader, LABEL, ck.ck.len()).unwrap();
1443
1444    assert_eq!(ck.ck.len(), read_ck.ck.len());
1445    assert_eq!(ck.h, read_ck.h);
1446    assert_eq!(ck.tau_H, read_ck.tau_H);
1447    for i in 0..ck.ck.len() {
1448      assert_eq!(ck.ck[i], read_ck.ck[i]);
1449    }
1450  }
1451
1452  #[cfg(feature = "io")]
1453  #[ignore = "only available with external ptau files"]
1454  #[test]
1455  fn test_load_ptau() {
1456    let filename = "/tmp/ppot_0080_13.ptau";
1457    let file = OpenOptions::new().read(true).open(filename).unwrap();
1458
1459    let mut reader = BufReader::new(file);
1460
1461    let ck = hyperkzg::CommitmentEngine::<E>::load_setup(&mut reader, b"test", 1).unwrap();
1462
1463    let mut rng = rand::thread_rng();
1464
1465    let gen_g1 = ck.ck[0];
1466    let t_g2 = ck.tau_H;
1467
1468    for _ in 0..1000 {
1469      let x = Fr::from(<rand::rngs::ThreadRng as rand::Rng>::gen::<u64>(&mut rng));
1470      let x = x * x * x * x;
1471
1472      let left = halo2curves::bn256::G1::pairing(&(gen_g1 * x), &t_g2.into());
1473      let right = halo2curves::bn256::G1::pairing(&gen_g1.into(), &t_g2.into()) * x;
1474
1475      assert_eq!(left, right);
1476    }
1477  }
1478}
1479
1480#[cfg(test)]
1481mod evm_tests {
1482  use super::*;
1483  use crate::provider::Bn256EngineKZG;
1484
1485  #[test]
1486  fn test_commitment_evm_serialization() {
1487    type E = Bn256EngineKZG;
1488
1489    let comm = Commitment::<E>::default();
1490    let bytes = bincode::serde::encode_to_vec(comm, bincode::config::legacy()).unwrap();
1491
1492    println!(
1493      "Commitment serialized length in nova-snark: {} bytes",
1494      bytes.len()
1495    );
1496    println!(
1497      "Commitment hex: {}",
1498      hex::encode(&bytes[..std::cmp::min(64, bytes.len())])
1499    );
1500
1501    // Expect 64 bytes for EVM feature, else 32 bytes
1502    assert_eq!(
1503      bytes.len(),
1504      if cfg!(feature = "evm") { 64 } else { 32 },
1505      "Commitment serialization length mismatch"
1506    );
1507  }
1508}