#[cfg(not(feature = "std"))]
use alloc::{format, vec::Vec};
use core::ops::RangeInclusive;
use itertools::Itertools;
use crate::field::extension::Extendable;
use crate::fri::proof::{
FriBatchMaskProofTarget, FriBatchMaskQueryTarget, FriChallengesTarget, FriFinalPolysTarget,
FriInitialTreeProofTarget, FriProofTarget, FriQueryRoundTarget, FriQueryStepTarget,
};
use crate::fri::structure::{
FriBatchInfoTarget, FriCoefficientTarget, FriInstanceInfoTarget, FriOpeningExpressionTarget,
FriOpeningsTarget,
};
use crate::fri::{FriConfig, FriFinalPolyLayout, FriParams};
use crate::gates::coset_interpolation::CosetInterpolationGate;
use crate::gates::gate::Gate;
use crate::gates::random_access::RandomAccessGate;
use crate::hash::hash_types::{MerkleCapTarget, RichField};
use crate::iop::ext_target::{flatten_target, ExtensionTarget};
use crate::iop::target::{BoolTarget, Target};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::config::{AlgebraicHasher, GenericConfig};
use crate::util::reducing::ReducingFactorTarget;
use crate::util::{log2_strict, reverse_index_bits_in_place};
use crate::with_context;
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
pub(crate) fn compute_evaluation(
&mut self,
x: Target,
x_index_within_coset_bits: &[BoolTarget],
arity_bits: usize,
evals: &[ExtensionTarget<D>],
beta: ExtensionTarget<D>,
) -> ExtensionTarget<D> {
let arity = 1 << arity_bits;
debug_assert_eq!(evals.len(), arity);
let g = F::primitive_root_of_unity(arity_bits);
let g_inv = g.exp_u64((arity as u64) - 1);
let mut evals = evals.to_vec();
reverse_index_bits_in_place(&mut evals);
let start = self.exp_from_bits_const_base(g_inv, x_index_within_coset_bits.iter().rev());
let coset_start = self.mul(start, x);
let interpolation_gate = <CosetInterpolationGate<F, D>>::with_max_degree(
arity_bits,
self.config.max_quotient_degree_factor,
);
self.interpolate_coset(interpolation_gate, coset_start, &evals, beta)
}
pub(crate) fn check_recursion_config(&self, max_fri_arity_bits: usize) {
let random_access = RandomAccessGate::<F, D>::new_from_config(
&self.config,
max_fri_arity_bits.max(self.config.fri_config.cap_height),
);
let interpolation_gate = CosetInterpolationGate::<F, D>::with_max_degree(
max_fri_arity_bits,
self.config.max_quotient_degree_factor,
);
let interpolation_wires = interpolation_gate.num_wires();
let interpolation_routed_wires = interpolation_gate.num_routed_wires();
let min_wires = random_access.num_wires().max(interpolation_wires);
let min_routed_wires = random_access
.num_routed_wires()
.max(interpolation_routed_wires);
assert!(
self.config.num_wires >= min_wires,
"To efficiently perform FRI checks with an arity of 2^{}, at least {} wires are needed. Consider reducing arity.",
max_fri_arity_bits,
min_wires
);
assert!(
self.config.num_routed_wires >= min_routed_wires,
"To efficiently perform FRI checks with an arity of 2^{}, at least {} routed wires are needed. Consider reducing arity.",
max_fri_arity_bits,
min_routed_wires
);
}
pub(crate) fn fri_verify_proof_of_work(
&mut self,
fri_pow_response: Target,
config: &FriConfig,
) {
self.assert_leading_zeros(
fri_pow_response,
config.proof_of_work_bits + (64 - F::order().bits()) as u32,
);
}
pub fn verify_fri_proof<C: GenericConfig<D, F = F>>(
&mut self,
instance: &FriInstanceInfoTarget<F, D>,
openings: &FriOpeningsTarget<D>,
challenges: &FriChallengesTarget<D>,
initial_merkle_caps: &[MerkleCapTarget],
proof: &FriProofTarget<D>,
params: &FriParams,
) where
C::Hasher: AlgebraicHasher<F>,
{
if let Some(max_arity_bits) = params.max_arity_bits() {
self.check_recursion_config(max_arity_bits);
}
debug_assert_eq!(
params.final_poly_chunks(),
proof.final_polys.chunks.len(),
"Final polynomial has wrong chunk count."
);
debug_assert!(proof
.final_polys
.chunks
.iter()
.all(|chunk| chunk.len() == params.final_poly_len()));
let n = params.lde_size();
with_context!(
self,
"check PoW",
self.fri_verify_proof_of_work(challenges.fri_pow_response, ¶ms.config)
);
debug_assert_eq!(
params.config.num_query_rounds,
proof.query_round_proofs.len(),
"Number of query rounds does not match config."
);
let precomputed_reduced_evals = with_context!(
self,
"precompute reduced evaluations",
PrecomputedReducedOpeningsTarget::from_os_and_alpha(
openings,
challenges.fri_alpha,
self
)
);
for (i, round_proof) in proof.query_round_proofs.iter().enumerate() {
let level = if i == 1 {
log::Level::Debug
} else {
log::Level::Trace
};
let num_queries = proof.query_round_proofs.len();
with_context!(
self,
level,
&format!("verify one (of {num_queries}) query rounds"),
self.fri_verifier_query_round::<C>(
instance,
challenges,
&precomputed_reduced_evals,
initial_merkle_caps,
proof,
i,
challenges.fri_query_indices[i],
n,
round_proof,
params,
)
);
}
}
pub fn verify_fri_proof_with_multiple_degree_bits<C: GenericConfig<D, F = F>>(
&mut self,
instance: &FriInstanceInfoTarget<F, D>,
openings: &FriOpeningsTarget<D>,
challenges: &FriChallengesTarget<D>,
initial_merkle_caps: &[MerkleCapTarget],
proof: &FriProofTarget<D>,
params: &FriParams,
current_degree_bits: Target,
degree_sub_one_bits_vec: &[BoolTarget],
min_degree_bits_to_support: usize,
) where
C::Hasher: AlgebraicHasher<F>,
{
if let Some(max_arity_bits) = params.max_arity_bits() {
self.check_recursion_config(max_arity_bits);
}
debug_assert_eq!(
params.final_poly_chunks(),
proof.final_polys.chunks.len(),
"Final polynomial has wrong chunk count."
);
debug_assert!(proof
.final_polys
.chunks
.iter()
.all(|chunk| chunk.len() == params.final_poly_len()));
let log_n = params.config.rate_bits + params.degree_bits;
let mut current_log_n = self.constant(F::from_canonical_usize(params.config.rate_bits));
current_log_n = self.add(current_log_n, current_degree_bits);
let min_log_n_to_support = params.config.rate_bits + min_degree_bits_to_support;
with_context!(
self,
"check PoW",
self.fri_verify_proof_of_work(challenges.fri_pow_response, ¶ms.config)
);
debug_assert_eq!(
params.config.num_query_rounds,
proof.query_round_proofs.len(),
"Number of query rounds does not match config."
);
let precomputed_reduced_evals = with_context!(
self,
"precompute reduced evaluations",
PrecomputedReducedOpeningsTarget::from_os_and_alpha(
openings,
challenges.fri_alpha,
self
)
);
for (i, round_proof) in proof.query_round_proofs.iter().enumerate() {
let level = if i == 1 {
log::Level::Debug
} else {
log::Level::Trace
};
let num_queries = proof.query_round_proofs.len();
with_context!(
self,
level,
&format!("verify one (of {num_queries}) query rounds"),
self.fri_verifier_query_round_with_multiple_degree_bits::<C>(
instance,
challenges,
&precomputed_reduced_evals,
initial_merkle_caps,
proof,
i,
challenges.fri_query_indices[i],
min_log_n_to_support..=log_n,
current_log_n,
degree_sub_one_bits_vec,
round_proof,
params,
)
);
}
}
fn fri_verify_initial_proof<H: AlgebraicHasher<F>>(
&mut self,
x_index_bits: &[BoolTarget],
proof: &FriInitialTreeProofTarget,
initial_merkle_caps: &[MerkleCapTarget],
cap_index: Target,
) {
for (i, ((evals, merkle_proof), cap)) in proof
.evals_proofs
.iter()
.zip(initial_merkle_caps)
.enumerate()
{
with_context!(
self,
&format!("verify {i}'th initial Merkle proof"),
self.verify_merkle_proof_to_cap_with_cap_index::<H>(
evals.clone(),
x_index_bits,
cap_index,
cap,
merkle_proof
)
);
}
}
fn fri_verify_initial_proof_with_multiple_degree_bits<H: AlgebraicHasher<F>>(
&mut self,
x_index_bits: &[BoolTarget],
log_n_range: RangeInclusive<usize>,
n_index: Target,
proof: &FriInitialTreeProofTarget,
initial_merkle_caps: &[MerkleCapTarget],
cap_index: Target,
) {
let one = self.one();
for (i, ((evals, merkle_proof), cap)) in proof
.evals_proofs
.iter()
.zip(initial_merkle_caps)
.enumerate()
{
with_context!(
self,
&format!("verify {i}'th initial Merkle proof"),
self.verify_merkle_proof_to_cap_with_cap_indices::<H>(
one,
evals.clone(),
x_index_bits,
log_n_range.clone(),
n_index,
cap_index,
cap,
merkle_proof
)
);
}
}
pub(crate) fn eval_opening_expression_target(
&mut self,
instance: &FriInstanceInfoTarget<F, D>,
expression: &FriOpeningExpressionTarget<F, D>,
proof: &FriInitialTreeProofTarget,
point: ExtensionTarget<D>,
params: &FriParams,
) -> ExtensionTarget<D> {
let mut point_power_cache = Vec::new();
self.eval_opening_expression_target_with_point_powers(
instance,
expression,
proof,
point,
params,
&mut point_power_cache,
)
}
fn cached_point_power_target(
&mut self,
point: ExtensionTarget<D>,
power: usize,
point_power_cache: &mut Vec<(usize, ExtensionTarget<D>)>,
) -> ExtensionTarget<D> {
if let Some((_, cached_power)) = point_power_cache
.iter()
.find(|(cached_power, _)| *cached_power == power)
{
*cached_power
} else {
let power_value = self.exp_u64_extension(point, power as u64);
point_power_cache.push((power, power_value));
power_value
}
}
fn eval_opening_expression_target_with_point_powers(
&mut self,
instance: &FriInstanceInfoTarget<F, D>,
expression: &FriOpeningExpressionTarget<F, D>,
proof: &FriInitialTreeProofTarget,
point: ExtensionTarget<D>,
params: &FriParams,
point_power_cache: &mut Vec<(usize, ExtensionTarget<D>)>,
) -> ExtensionTarget<D> {
let terms = expression
.terms
.iter()
.map(|term| {
let coefficient = match &term.coefficient {
FriCoefficientTarget::One => self.one_extension(),
FriCoefficientTarget::PointPower(power) => {
self.cached_point_power_target(point, *power, point_power_cache)
}
FriCoefficientTarget::Constant(constant) => self.constant_extension(*constant),
};
let poly_blinding = instance.oracles[term.polynomial.oracle_index].blinding;
let salted = params.leaf_hiding && poly_blinding;
let raw_eval = proof.unsalted_eval(
term.polynomial.oracle_index,
term.polynomial.polynomial_index,
salted,
);
let raw_eval_ext = self.convert_to_ext(raw_eval);
self.mul_extension(coefficient, raw_eval_ext)
})
.collect_vec();
self.add_many_extension(&terms)
}
pub(crate) fn combine_final_poly_chunks_target(
&mut self,
layout: &FriFinalPolyLayout,
values: &[ExtensionTarget<D>],
point: ExtensionTarget<D>,
) -> ExtensionTarget<D> {
match layout {
FriFinalPolyLayout::Single => values[0],
FriFinalPolyLayout::Split {
chunk_degree_bits,
chunks,
} => {
debug_assert_eq!(*chunks, values.len());
let point_stride = self.exp_power_of_2_extension(point, *chunk_degree_bits);
let mut weight = self.one_extension();
let mut sum = self.zero_extension();
for value in values {
let weighted_value = self.mul_extension(weight, *value);
sum = self.add_extension(sum, weighted_value);
weight = self.mul_extension(weight, point_stride);
}
sum
}
}
}
pub(crate) fn eval_batch_mask_at_query_point_target(
&mut self,
query: &FriBatchMaskQueryTarget<D>,
subgroup_x: ExtensionTarget<D>,
params: &FriParams,
) -> ExtensionTarget<D> {
self.combine_final_poly_chunks_target(
¶ms.batch_mask_layout(),
&query.values,
subgroup_x,
)
}
pub(crate) fn eval_masked_final_at_query_point_target(
&mut self,
expected_unmasked_final: ExtensionTarget<D>,
batch_mask_eval: Option<ExtensionTarget<D>>,
) -> ExtensionTarget<D> {
if let Some(batch_mask_eval) = batch_mask_eval {
self.add_extension(expected_unmasked_final, batch_mask_eval)
} else {
expected_unmasked_final
}
}
pub(crate) fn eval_final_polys_at_point_target(
&mut self,
final_polys: &FriFinalPolysTarget<D>,
point: ExtensionTarget<D>,
params: &FriParams,
) -> ExtensionTarget<D> {
let values = final_polys
.chunks
.iter()
.map(|chunk| chunk.eval(self, point))
.collect_vec();
self.combine_final_poly_chunks_target(¶ms.final_poly_layout, &values, point)
}
fn fri_combine_initial(
&mut self,
instance: &FriInstanceInfoTarget<F, D>,
proof: &FriInitialTreeProofTarget,
alpha: ExtensionTarget<D>,
subgroup_x: Target,
precomputed_reduced_evals: &PrecomputedReducedOpeningsTarget<D>,
params: &FriParams,
) -> ExtensionTarget<D> {
assert!(D > 1, "Not implemented for D=1.");
let degree_log = params.degree_bits;
debug_assert_eq!(
degree_log,
params.config.cap_height + proof.evals_proofs[0].1.siblings.len()
- params.config.rate_bits
);
let subgroup_x = self.convert_to_ext(subgroup_x);
let mut alpha = ReducingFactorTarget::new(alpha);
let mut sum = self.zero_extension();
for (batch, reduced_openings) in instance
.batches
.iter()
.zip(&precomputed_reduced_evals.reduced_openings_at_point)
{
let FriBatchInfoTarget { point, openings } = batch;
let mut point_power_cache = Vec::new();
let evals = openings
.iter()
.map(|expression| {
self.eval_opening_expression_target_with_point_powers(
instance,
expression,
proof,
*point,
params,
&mut point_power_cache,
)
})
.collect_vec();
let reduced_evals = alpha.reduce(&evals, self);
let numerator = self.sub_extension(reduced_evals, *reduced_openings);
let denominator = self.sub_extension(subgroup_x, *point);
sum = alpha.shift(sum, self);
sum = self.div_add_extension(numerator, denominator, sum);
}
sum
}
fn fri_verifier_query_round<C: GenericConfig<D, F = F>>(
&mut self,
instance: &FriInstanceInfoTarget<F, D>,
challenges: &FriChallengesTarget<D>,
precomputed_reduced_evals: &PrecomputedReducedOpeningsTarget<D>,
initial_merkle_caps: &[MerkleCapTarget],
proof: &FriProofTarget<D>,
query_round_index: usize,
x_index: Target,
n: usize,
round_proof: &FriQueryRoundTarget<D>,
params: &FriParams,
) where
C::Hasher: AlgebraicHasher<F>,
{
let n_log = log2_strict(n);
Self::assert_noncanonical_indices_ok(¶ms.config);
let mut x_index_bits = self.low_bits(x_index, n_log, F::BITS);
let cap_index =
self.le_sum(x_index_bits[x_index_bits.len() - params.config.cap_height..].iter());
with_context!(
self,
"check FRI initial proof",
self.fri_verify_initial_proof::<C::Hasher>(
&x_index_bits,
&round_proof.initial_trees_proof,
initial_merkle_caps,
cap_index
)
);
let mut subgroup_x = with_context!(self, "compute x from its index", {
let g = self.constant(F::coset_shift());
let phi = F::primitive_root_of_unity(n_log);
let phi = self.exp_from_bits_const_base(phi, x_index_bits.iter().rev());
self.mul(g, phi)
});
let expected_unmasked_final = with_context!(
self,
"combine initial oracles",
self.fri_combine_initial(
instance,
&round_proof.initial_trees_proof,
challenges.fri_alpha,
subgroup_x,
precomputed_reduced_evals,
params,
)
);
let batch_mask_eval = if let Some(batch_mask_proof) = &proof.batch_mask_proof {
let query_opening = &batch_mask_proof.query_openings[query_round_index];
with_context!(
self,
"verify batch-mask Merkle proof",
self.verify_merkle_proof_to_cap_with_cap_index::<C::Hasher>(
flatten_target(&query_opening.values),
&x_index_bits,
cap_index,
&batch_mask_proof.cap,
&query_opening.merkle_proof,
)
);
let subgroup_x_ext = self.convert_to_ext(subgroup_x);
Some(self.eval_batch_mask_at_query_point_target(query_opening, subgroup_x_ext, params))
} else {
None
};
let mut old_eval =
self.eval_masked_final_at_query_point_target(expected_unmasked_final, batch_mask_eval);
for (i, &arity_bits) in params.reduction_arity_bits.iter().enumerate() {
let evals = &round_proof.steps[i].evals;
let coset_index_bits = x_index_bits[arity_bits..].to_vec();
let x_index_within_coset_bits = &x_index_bits[..arity_bits];
let x_index_within_coset = self.le_sum(x_index_within_coset_bits.iter());
let new_eval = self.random_access_extension(x_index_within_coset, evals.clone());
self.connect_extension(new_eval, old_eval);
old_eval = with_context!(
self,
"infer evaluation using interpolation",
self.compute_evaluation(
subgroup_x,
x_index_within_coset_bits,
arity_bits,
evals,
challenges.fri_betas[i],
)
);
with_context!(
self,
"verify FRI round Merkle proof.",
self.verify_merkle_proof_to_cap_with_cap_index::<C::Hasher>(
flatten_target(evals),
&coset_index_bits,
cap_index,
&proof.commit_phase_merkle_caps[i],
&round_proof.steps[i].merkle_proof,
)
);
subgroup_x = self.exp_power_of_2(subgroup_x, arity_bits);
x_index_bits = coset_index_bits;
}
let eval = with_context!(
self,
&format!(
"evaluate {} final polynomial chunks",
proof.final_polys.chunks.len()
),
{
let subgroup_x_ext = self.convert_to_ext(subgroup_x);
self.eval_final_polys_at_point_target(&proof.final_polys, subgroup_x_ext, params)
}
);
self.connect_extension(eval, old_eval);
}
fn fri_verifier_query_round_with_multiple_degree_bits<C: GenericConfig<D, F = F>>(
&mut self,
instance: &FriInstanceInfoTarget<F, D>,
challenges: &FriChallengesTarget<D>,
precomputed_reduced_evals: &PrecomputedReducedOpeningsTarget<D>,
initial_merkle_caps: &[MerkleCapTarget],
proof: &FriProofTarget<D>,
query_round_index: usize,
x_index: Target,
log_n_range: RangeInclusive<usize>,
log_n: Target,
degree_sub_one_bits_vec: &[BoolTarget],
round_proof: &FriQueryRoundTarget<D>,
params: &FriParams,
) where
C::Hasher: AlgebraicHasher<F>,
{
assert!(*log_n_range.start() > params.config.cap_height);
let n_index = {
let min_log_n = self.constant(F::from_canonical_usize(*log_n_range.start()));
self.sub(log_n, min_log_n)
};
Self::assert_noncanonical_indices_ok(¶ms.config);
let mut x_index_bits = self.low_bits(x_index, *log_n_range.end(), F::BITS);
let cap_indices: Vec<_> = log_n_range
.clone()
.map(|n| {
let slice_start = n - params.config.cap_height;
self.le_sum(x_index_bits[slice_start..n].iter())
})
.collect();
let cap_index = self.random_access(n_index, cap_indices);
with_context!(
self,
"check FRI initial proof",
self.fri_verify_initial_proof_with_multiple_degree_bits::<C::Hasher>(
&x_index_bits,
log_n_range.clone(),
n_index,
&round_proof.initial_trees_proof,
initial_merkle_caps,
cap_index,
)
);
let g = self.constant(F::coset_shift());
let subgroup_x_vec: Vec<_> = log_n_range
.clone()
.map(|n| {
with_context!(self, "compute x from its index", {
let phi = F::primitive_root_of_unity(n);
let phi = self.exp_from_bits_const_base(phi, x_index_bits[..n].iter().rev());
self.mul(g, phi)
})
})
.collect();
let mut subgroup_x = self.random_access(n_index, subgroup_x_vec);
let expected_unmasked_final = with_context!(
self,
"combine initial oracles",
self.fri_combine_initial(
instance,
&round_proof.initial_trees_proof,
challenges.fri_alpha,
subgroup_x,
precomputed_reduced_evals,
params,
)
);
let batch_mask_eval = if let Some(batch_mask_proof) = &proof.batch_mask_proof {
let one = self.one();
let query_opening = &batch_mask_proof.query_openings[query_round_index];
with_context!(
self,
"verify batch-mask Merkle proof",
self.verify_merkle_proof_to_cap_with_cap_indices::<C::Hasher>(
one,
flatten_target(&query_opening.values),
&x_index_bits,
log_n_range.clone(),
n_index,
cap_index,
&batch_mask_proof.cap,
&query_opening.merkle_proof,
)
);
let subgroup_x_ext = self.convert_to_ext(subgroup_x);
Some(self.eval_batch_mask_at_query_point_target(query_opening, subgroup_x_ext, params))
} else {
None
};
let mut old_eval =
self.eval_masked_final_at_query_point_target(expected_unmasked_final, batch_mask_eval);
let mut index_in_degree_sub_one_bits_vec = {
let mut degree_bits_len = degree_sub_one_bits_vec.len();
for arity_bits in ¶ms.reduction_arity_bits {
degree_bits_len -= arity_bits;
}
degree_bits_len
};
for (i, &arity_bits) in params.reduction_arity_bits.iter().enumerate() {
let evals = &round_proof.steps[i].evals;
let coset_index_bits = x_index_bits[arity_bits..].to_vec();
let x_index_within_coset_bits = &x_index_bits[..arity_bits];
let x_index_within_coset = self.le_sum(x_index_within_coset_bits.iter());
let new_eval = self.random_access_extension(x_index_within_coset, evals.clone());
let step_active = degree_sub_one_bits_vec[index_in_degree_sub_one_bits_vec];
self.conditional_assert_eq_ext(step_active.target, new_eval, old_eval);
let eval = with_context!(
self,
"infer evaluation using interpolation",
self.compute_evaluation(
subgroup_x,
x_index_within_coset_bits,
arity_bits,
evals,
challenges.fri_betas[i],
)
);
old_eval = self.select_ext(step_active, eval, old_eval);
with_context!(
self,
"verify FRI round Merkle proof.",
self.verify_merkle_proof_to_cap_with_cap_indices::<C::Hasher>(
step_active.target,
flatten_target(evals),
&coset_index_bits,
log_n_range.clone(),
n_index,
cap_index,
&proof.commit_phase_merkle_caps[i],
&round_proof.steps[i].merkle_proof,
)
);
let subgroup_x_cur = self.exp_power_of_2(subgroup_x, arity_bits);
subgroup_x = self.select(step_active, subgroup_x_cur, subgroup_x);
x_index_bits = coset_index_bits;
index_in_degree_sub_one_bits_vec += arity_bits;
}
let eval = with_context!(
self,
&format!(
"evaluate {} final polynomial chunks",
proof.final_polys.chunks.len()
),
{
let subgroup_x_ext = self.convert_to_ext(subgroup_x);
self.eval_final_polys_at_point_target(&proof.final_polys, subgroup_x_ext, params)
}
);
self.connect_extension(eval, old_eval);
}
pub(crate) fn assert_noncanonical_indices_ok(config: &FriConfig) {
let num_ambiguous_elems = u64::MAX - F::ORDER + 1;
let query_error = config.rate();
let p_ambiguous = (num_ambiguous_elems as f64) / (F::ORDER as f64);
assert!(p_ambiguous < query_error * 1e-5,
"A non-negligible portion of field elements are in the range that permits non-canonical encodings. Need to do more analysis or enforce canonical encodings.");
}
pub fn add_virtual_fri_proof(
&mut self,
num_leaves_per_oracle: &[usize],
params: &FriParams,
) -> FriProofTarget<D> {
let cap_height = params.config.cap_height;
let num_queries = params.config.num_query_rounds;
let commit_phase_merkle_caps = (0..params.reduction_arity_bits.len())
.map(|_| self.add_virtual_cap(cap_height))
.collect();
let batch_mask_proof = params
.batch_masking
.as_ref()
.map(|_| self.add_virtual_fri_batch_mask_proof(params));
let query_round_proofs = (0..num_queries)
.map(|_| self.add_virtual_fri_query(num_leaves_per_oracle, params))
.collect();
let final_polys = FriFinalPolysTarget {
chunks: (0..params.final_poly_chunks())
.map(|_| self.add_virtual_poly_coeff_ext(params.final_poly_len()))
.collect(),
};
let pow_witness = self.add_virtual_target();
FriProofTarget {
commit_phase_merkle_caps,
batch_mask_proof,
query_round_proofs,
final_polys,
pow_witness,
}
}
fn add_virtual_fri_batch_mask_proof(
&mut self,
params: &FriParams,
) -> FriBatchMaskProofTarget<D> {
let cap_height = params.config.cap_height;
let merkle_proof_len = params.lde_bits() - cap_height;
FriBatchMaskProofTarget {
cap: self.add_virtual_cap(cap_height),
query_openings: (0..params.config.num_query_rounds)
.map(|_| FriBatchMaskQueryTarget {
values: self.add_virtual_extension_targets(params.batch_mask_layout().chunks()),
merkle_proof: self.add_virtual_merkle_proof(merkle_proof_len),
})
.collect(),
}
}
fn add_virtual_fri_query(
&mut self,
num_leaves_per_oracle: &[usize],
params: &FriParams,
) -> FriQueryRoundTarget<D> {
let cap_height = params.config.cap_height;
assert!(params.lde_bits() >= cap_height);
let mut merkle_proof_len = params.lde_bits() - cap_height;
let initial_trees_proof =
self.add_virtual_fri_initial_trees_proof(num_leaves_per_oracle, merkle_proof_len);
let mut steps = Vec::with_capacity(params.reduction_arity_bits.len());
for &arity_bits in ¶ms.reduction_arity_bits {
assert!(merkle_proof_len >= arity_bits);
merkle_proof_len -= arity_bits;
steps.push(self.add_virtual_fri_query_step(arity_bits, merkle_proof_len));
}
FriQueryRoundTarget {
initial_trees_proof,
steps,
}
}
fn add_virtual_fri_initial_trees_proof(
&mut self,
num_leaves_per_oracle: &[usize],
initial_merkle_proof_len: usize,
) -> FriInitialTreeProofTarget {
let evals_proofs = num_leaves_per_oracle
.iter()
.map(|&num_oracle_leaves| {
let leaves = self.add_virtual_targets(num_oracle_leaves);
let merkle_proof = self.add_virtual_merkle_proof(initial_merkle_proof_len);
(leaves, merkle_proof)
})
.collect();
FriInitialTreeProofTarget { evals_proofs }
}
fn add_virtual_fri_query_step(
&mut self,
arity_bits: usize,
merkle_proof_len: usize,
) -> FriQueryStepTarget<D> {
FriQueryStepTarget {
evals: self.add_virtual_extension_targets(1 << arity_bits),
merkle_proof: self.add_virtual_merkle_proof(merkle_proof_len),
}
}
}
#[derive(Clone)]
pub(crate) struct PrecomputedReducedOpeningsTarget<const D: usize> {
pub(crate) reduced_openings_at_point: Vec<ExtensionTarget<D>>,
}
impl<const D: usize> PrecomputedReducedOpeningsTarget<D> {
pub(crate) fn from_os_and_alpha<F: RichField + Extendable<D>>(
openings: &FriOpeningsTarget<D>,
alpha: ExtensionTarget<D>,
builder: &mut CircuitBuilder<F, D>,
) -> Self {
let reduced_openings_at_point = openings
.batches
.iter()
.map(|batch| ReducingFactorTarget::new(alpha).reduce(&batch.values, builder))
.collect();
Self {
reduced_openings_at_point,
}
}
}