pub mod direct;
pub mod ppsnark;
pub mod snark;
#[macro_use]
mod macros;
pub mod math;
pub mod polys;
pub mod sumcheck;
pub use sumcheck::SumcheckEngine;
use crate::{
errors::NovaError,
r1cs::{R1CSShape, SparseMatrix},
traits::{Engine, TranscriptEngineTrait},
Commitment,
};
use ff::{Field, PrimeField};
use itertools::Itertools as _;
use rayon::{iter::IntoParallelRefIterator, prelude::*};
use std::cmp::max;
pub fn powers<E: Engine>(s: &E::Scalar, n: usize) -> Vec<E::Scalar> {
assert!(n >= 1);
let mut powers = Vec::with_capacity(n);
powers.push(E::Scalar::ONE);
for i in 1..n {
powers.push(powers[i - 1] * s);
}
powers
}
pub fn batch_invert<Scalar: PrimeField>(v: &[Scalar]) -> Result<Vec<Scalar>, NovaError> {
const MAX_SIZE_FOR_SERIAL: usize = 4096;
const MIN_CHUNK_SIZE: usize = 128;
if v.len() < MAX_SIZE_FOR_SERIAL {
return batch_invert_serial(v);
}
let compute_prod = |beta: &[Scalar]| -> (Vec<Scalar>, Scalar) {
let mut prod = vec![Scalar::ZERO; beta.len()];
let mut acc = Scalar::ONE;
prod.iter_mut().zip(beta).for_each(|(p, e)| {
let nxt_acc = acc * *e;
*p = acc;
acc = nxt_acc;
});
(prod, acc)
};
let compute_inv = |beta: &mut [Scalar], prod: &[Scalar], init_inv: Scalar| {
let mut acc = init_inv;
for i in (0..beta.len()).rev() {
let nxt_acc = acc * beta[i];
beta[i] = acc * prod[i];
acc = nxt_acc;
}
};
let mut v = v.to_vec();
let num_chunks = rayon::current_num_threads();
let chunk_size = max(MIN_CHUNK_SIZE, v.len() / num_chunks);
let (prod1, mut beta2, prod2, root) = {
let chunks = v
.par_chunks(chunk_size)
.map(compute_prod)
.collect::<Vec<_>>();
let (prod1, beta2): (Vec<Vec<_>>, Vec<_>) = chunks.into_iter().unzip();
let (prod2, root) = compute_prod(&beta2);
(prod1, beta2, prod2, root)
};
if root == Scalar::ZERO {
return Err(NovaError::InternalError);
}
let root_inv = root.invert().unwrap();
compute_inv(&mut beta2, &prod2, root_inv);
v.par_chunks_mut(chunk_size)
.zip(prod1)
.zip(beta2)
.for_each(|((v, p), b)| {
compute_inv(v, &p, b);
});
Ok(v)
}
pub fn batch_invert_serial<Scalar: PrimeField>(v: &[Scalar]) -> Result<Vec<Scalar>, NovaError> {
let mut products = vec![Scalar::ZERO; v.len()];
let mut acc = Scalar::ONE;
for i in 0..v.len() {
products[i] = acc;
acc *= v[i];
}
if acc == Scalar::ZERO {
return Err(NovaError::InternalError);
}
acc = acc.invert().unwrap();
let mut inv = vec![Scalar::ZERO; v.len()];
for i in 0..v.len() {
let tmp = acc * v[v.len() - 1 - i];
inv[v.len() - 1 - i] = products[v.len() - 1 - i] * acc;
acc = tmp;
}
Ok(inv)
}
#[derive(Clone, Debug)]
pub struct PolyEvalWitness<E: Engine> {
p: Vec<E::Scalar>, }
impl<E: Engine> PolyEvalWitness<E> {
pub fn p(&self) -> &[E::Scalar] {
&self.p
}
fn batch_diff_size(W: Vec<PolyEvalWitness<E>>, s: E::Scalar) -> PolyEvalWitness<E> {
let powers = powers::<E>(&s, W.len());
let size_max = W.iter().map(|w| w.p.len()).max().unwrap();
let num_chunks = rayon::current_num_threads().next_power_of_two();
let chunk_size = size_max / num_chunks;
let p = if chunk_size > 0 {
(0..num_chunks)
.into_par_iter()
.flat_map_iter(|chunk_index| {
let mut chunk = vec![E::Scalar::ZERO; chunk_size];
for (coeff, poly) in powers.iter().zip(W.iter()) {
for (rlc, poly_eval) in chunk
.iter_mut()
.zip(poly.p[chunk_index * chunk_size..].iter())
{
if *coeff == E::Scalar::ONE {
*rlc += *poly_eval;
} else {
*rlc += *coeff * poly_eval;
};
}
}
chunk
})
.collect::<Vec<_>>()
} else {
W.into_par_iter()
.zip_eq(powers.par_iter())
.map(|(mut w, s)| {
if *s != E::Scalar::ONE {
w.p.par_iter_mut().for_each(|e| *e *= s);
}
w.p
})
.reduce(
|| vec![E::Scalar::ZERO; size_max],
|left, right| {
let (mut big, small) = if left.len() > right.len() {
(left, right)
} else {
(right, left)
};
big
.par_iter_mut()
.zip(small.par_iter())
.for_each(|(b, s)| *b += s);
big
},
)
};
PolyEvalWitness { p }
}
fn batch(p_vec: &[&Vec<E::Scalar>], s: &E::Scalar) -> PolyEvalWitness<E> {
p_vec
.iter()
.for_each(|p| assert_eq!(p.len(), p_vec[0].len()));
let powers_of_s = powers::<E>(s, p_vec.len());
let num_chunks = rayon::current_num_threads().next_power_of_two();
let chunk_size = p_vec[0].len() / num_chunks;
let p = if chunk_size > 0 {
(0..num_chunks)
.into_par_iter()
.flat_map_iter(|chunk_index| {
let mut chunk = vec![E::Scalar::ZERO; chunk_size];
for (coeff, poly) in powers_of_s.iter().zip(p_vec.iter()) {
for (rlc, poly_eval) in chunk
.iter_mut()
.zip(poly[chunk_index * chunk_size..].iter())
{
if *coeff == E::Scalar::ONE {
*rlc += *poly_eval;
} else {
*rlc += *coeff * poly_eval;
};
}
}
chunk
})
.collect::<Vec<_>>()
} else {
zip_with!(par_iter, (p_vec, powers_of_s), |v, weight| {
v.iter().map(|&x| x * *weight).collect::<Vec<E::Scalar>>()
})
.reduce(
|| vec![E::Scalar::ZERO; p_vec[0].len()],
|acc, v| {
zip_with!((acc.into_iter(), v), |x, y| x + y).collect()
},
)
};
PolyEvalWitness { p }
}
}
#[derive(Clone, Debug)]
pub struct PolyEvalInstance<E: Engine> {
c: Commitment<E>, x: Vec<E::Scalar>, e: E::Scalar, }
impl<E: Engine> PolyEvalInstance<E> {
pub fn c(&self) -> &Commitment<E> {
&self.c
}
pub fn x(&self) -> &[E::Scalar] {
&self.x
}
pub fn e(&self) -> E::Scalar {
self.e
}
fn batch_diff_size(
c_vec: &[Commitment<E>],
e_vec: &[E::Scalar],
num_vars: &[usize],
x: Vec<E::Scalar>,
s: E::Scalar,
) -> PolyEvalInstance<E> {
let num_instances = num_vars.len();
assert_eq!(c_vec.len(), num_instances);
assert_eq!(e_vec.len(), num_instances);
let num_vars_max = x.len();
let powers: Vec<E::Scalar> = powers::<E>(&s, num_instances);
let evals_scaled = zip_with!(iter, (e_vec, num_vars), |eval, num_rounds| {
let (r_lo, _r_hi) = x.split_at(num_vars_max - num_rounds);
let lagrange_eval = r_lo
.iter()
.map(|r| E::Scalar::ONE - r)
.product::<E::Scalar>();
lagrange_eval * eval
})
.collect::<Vec<_>>();
let comm_joint = zip_with!(iter, (c_vec, powers), |c, g_i| *c * *g_i)
.fold(Commitment::<E>::default(), |acc, item| acc + item);
let eval_joint = zip_with!((evals_scaled.into_iter(), powers.iter()), |e, g_i| e * g_i).sum();
PolyEvalInstance {
c: comm_joint,
x,
e: eval_joint,
}
}
fn batch(
c_vec: &[Commitment<E>],
x: &[E::Scalar],
e_vec: &[E::Scalar],
s: &E::Scalar,
) -> PolyEvalInstance<E> {
let num_instances = c_vec.len();
assert_eq!(e_vec.len(), num_instances);
let powers_of_s = powers::<E>(s, num_instances);
let e = zip_with!(par_iter, (e_vec, powers_of_s), |e, p| *e * p).sum();
let c = zip_with!(par_iter, (c_vec, powers_of_s), |c, p| *c * *p)
.reduce(Commitment::<E>::default, |acc, item| acc + item);
PolyEvalInstance {
c,
x: x.to_vec(),
e,
}
}
}
pub fn batch_eval_reduce<E: Engine>(
u_vec: Vec<PolyEvalInstance<E>>,
w_vec: Vec<PolyEvalWitness<E>>,
transcript: &mut E::TE,
) -> Result<
(
PolyEvalInstance<E>,
PolyEvalWitness<E>,
E::Scalar,
sumcheck::SumcheckProof<E>,
Vec<E::Scalar>,
),
NovaError,
> {
use polys::multilinear::MultilinearPolynomial;
let num_claims = u_vec.len();
assert_eq!(w_vec.len(), num_claims);
let num_rounds = u_vec.iter().map(|u| u.x.len()).collect::<Vec<_>>();
w_vec
.iter()
.zip_eq(num_rounds.iter())
.for_each(|(w, num_vars)| assert_eq!(w.p.len(), 1 << num_vars));
let rho = transcript.squeeze(b"r")?;
let powers_of_rho = powers::<E>(&rho, num_claims);
let (claims, u_xs, comms): (Vec<_>, Vec<_>, Vec<_>) =
u_vec.into_iter().map(|u| (u.e, u.x, u.c)).multiunzip();
let polys_P: Vec<MultilinearPolynomial<E::Scalar>> = w_vec
.iter()
.map(|w| MultilinearPolynomial::new(w.p.clone()))
.collect();
let (sc_proof_batch, r, claims_batch_left) = sumcheck::SumcheckProof::prove_batch_eval(
&claims,
&num_rounds,
polys_P,
u_xs,
&powers_of_rho,
transcript,
)?;
transcript.absorb(b"l", &claims_batch_left.as_slice());
let c = transcript.squeeze(b"c")?;
let u_joint = PolyEvalInstance::batch_diff_size(&comms, &claims_batch_left, &num_rounds, r, c);
let w_joint = PolyEvalWitness::batch_diff_size(w_vec, c);
Ok((u_joint, w_joint, c, sc_proof_batch, claims_batch_left))
}
pub fn batch_eval_verify<E: Engine>(
u_vec: Vec<PolyEvalInstance<E>>,
transcript: &mut E::TE,
sc_proof_batch: &sumcheck::SumcheckProof<E>,
evals_batch: &[E::Scalar],
) -> Result<(PolyEvalInstance<E>, E::Scalar), NovaError> {
use polys::eq::EqPolynomial;
let num_claims = u_vec.len();
assert_eq!(evals_batch.len(), num_claims);
let rho = transcript.squeeze(b"r")?;
let powers_of_rho = powers::<E>(&rho, num_claims);
let num_rounds = u_vec.iter().map(|u| u.x.len()).collect::<Vec<_>>();
let num_rounds_max = *num_rounds.iter().max().unwrap();
let claims = u_vec.iter().map(|u| u.e).collect::<Vec<_>>();
let (claim_batch_final, r) =
sc_proof_batch.verify_batch(&claims, &num_rounds, &powers_of_rho, 2, transcript)?;
let claim_batch_final_expected = {
let evals_r = u_vec.iter().map(|u| {
let (_, r_hi) = r.split_at(num_rounds_max - u.x.len());
EqPolynomial::new(r_hi.to_vec()).evaluate(&u.x)
});
zip_with!(
(evals_r, evals_batch.iter(), powers_of_rho.iter()),
|e_i, p_i, rho_i| e_i * *p_i * rho_i
)
.sum()
};
if claim_batch_final != claim_batch_final_expected {
return Err(NovaError::InvalidSumcheckProof);
}
transcript.absorb(b"l", &evals_batch);
let c = transcript.squeeze(b"c")?;
let comms = u_vec.into_iter().map(|u| u.c).collect::<Vec<_>>();
let u_joint = PolyEvalInstance::batch_diff_size(&comms, evals_batch, &num_rounds, r, c);
Ok((u_joint, c))
}
pub fn compute_eval_table_sparse<E: Engine>(
S: &R1CSShape<E>,
rx: &[E::Scalar],
) -> (Vec<E::Scalar>, Vec<E::Scalar>, Vec<E::Scalar>) {
assert_eq!(rx.len(), S.num_cons());
let inner = |M: &SparseMatrix<E::Scalar>, M_evals: &mut Vec<E::Scalar>| {
for (row_idx, ptrs) in M.indptr.windows(2).enumerate() {
for (val, col_idx) in M.get_row_unchecked(ptrs.try_into().unwrap()) {
M_evals[*col_idx] += rx[row_idx] * val;
}
}
};
let (A_evals, (B_evals, C_evals)) = rayon::join(
|| {
let mut A_evals: Vec<E::Scalar> = vec![E::Scalar::ZERO; 2 * S.num_vars()];
inner(S.A(), &mut A_evals);
A_evals
},
|| {
rayon::join(
|| {
let mut B_evals: Vec<E::Scalar> = vec![E::Scalar::ZERO; 2 * S.num_vars()];
inner(S.B(), &mut B_evals);
B_evals
},
|| {
let mut C_evals: Vec<E::Scalar> = vec![E::Scalar::ZERO; 2 * S.num_vars()];
inner(S.C(), &mut C_evals);
C_evals
},
)
},
);
(A_evals, B_evals, C_evals)
}
#[cfg(test)]
mod batch_invert_tests {
use super::{batch_invert, batch_invert_serial};
use ff::Field;
use rand::rngs::OsRng;
use rayon::iter::IntoParallelIterator;
use rayon::prelude::*;
type F = halo2curves::bn256::Fr;
#[test]
fn test_batch_invert() {
let n = (1 << 15) + 5;
let v = (0..n)
.into_par_iter()
.map(|_| F::random(&mut OsRng))
.collect::<Vec<_>>();
let res_1 = batch_invert_serial(&v);
let res_2 = batch_invert(&v);
assert_eq!(res_1, res_2)
}
}