winter_air/options.rs
1// Copyright (c) Facebook, Inc. and its affiliates.
2//
3// This source code is licensed under the MIT license found in the
4// LICENSE file in the root directory of this source tree.
5
6use alloc::vec::Vec;
7use core::cmp;
8
9use fri::FriOptions;
10use math::{FieldElement, StarkField, ToElements};
11use utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable};
12
13// CONSTANTS
14// ================================================================================================
15
16// most of these constants are set so that values fit into a u8 integer.
17
18const MAX_NUM_QUERIES: usize = 255;
19
20const MIN_BLOWUP_FACTOR: usize = 2;
21const MAX_BLOWUP_FACTOR: usize = 128;
22
23const MAX_GRINDING_FACTOR: u32 = 32;
24
25const FRI_MIN_FOLDING_FACTOR: usize = 2;
26const FRI_MAX_FOLDING_FACTOR: usize = 16;
27const FRI_MAX_REMAINDER_DEGREE: usize = 255;
28
29// TYPES AND INTERFACES
30// ================================================================================================
31
32/// Defines an extension field for the composition polynomial.
33///
34/// Choice of a field for a composition polynomial may impact proof soundness, and can also have
35/// a non-negligible impact on proof generation time and proof size. Specifically, for small
36/// fields, security offered by the base field itself may be inadequate or insufficient, and an
37/// extension of the base field may need to be used.
38///
39/// For example, if the size of base field is ~64-bits, a quadratic extension must be use to
40/// achieve ~100 bits of soundness, and a cubic extension must be used to achieve 128+ bits
41/// of soundness.
42///
43/// However, increasing extension degree will increase proof generation time and proof size by
44/// as much as 50%.
45#[repr(u8)]
46#[derive(Debug, Copy, Clone, Eq, PartialEq)]
47pub enum FieldExtension {
48 /// Composition polynomial is constructed in the base field.
49 None = 1,
50 /// Composition polynomial is constructed in the quadratic extension of the base field.
51 Quadratic = 2,
52 /// Composition polynomial is constructed in the cubic extension of the base field.
53 Cubic = 3,
54}
55
56/// STARK protocol parameters.
57///
58/// These parameters have a direct impact on proof soundness, proof generation time, and proof
59/// size. Specifically:
60///
61/// 1. Finite field - proof soundness depends on the size of finite field used by the protocol. This
62/// means, that for small fields (e.g. smaller than ~128 bits), field extensions must be used to
63/// achieve adequate security. And even for ~128 bit fields, to achieve security over 100 bits, a
64/// field extension may be required.
65/// 2. Number of queries - higher values increase proof soundness, but also increase proof size.
66/// 3. Blowup factor - higher values increase proof soundness, but also increase proof generation
67/// time and proof size. However, higher blowup factors require fewer queries for the same
68/// security level. Thus, it is frequently possible to increase blowup factor and at the same
69/// time decrease the number of queries in such a way that the proofs become smaller.
70/// 4. Grinding factor - higher values increase proof soundness, but also may increase proof
71/// generation time. More precisely, conjectured proof soundness is bounded by `num_queries *
72/// log2(blowup_factor) + grinding_factor`.
73/// 5. Batching method for constraint composition polynomial - either independent random values per
74/// constraint are used in the computation of the constraint composition polynomial or powers of
75/// a single random value are used instead. The first type of batching is called `Linear` while
76/// the second is called `Algebraic`.
77/// 6. Batching method for DEEP polynomial - either independent random values per multi-point
78/// quotient are used in the computation of the DEEP polynomial or powers of a single random
79/// value are used instead.
80///
81/// Another important parameter in defining STARK security level, which is not a part of
82/// [ProofOptions] is the hash function used in the protocol. The soundness of a STARK proof is
83/// limited by the collision resistance of the hash function used by the protocol. For example, if a
84/// hash function with 128-bit collision resistance is used, soundness of a STARK proof cannot
85/// exceed 128 bits.
86///
87/// In addition, partition options (see [PartitionOptions]) can be provided to split traces during
88/// proving and distribute work across multiple devices. Taking the main segment trace as an
89/// example, the prover will split the main segment trace into `num_partitions` parts, and then
90/// proceed to hash each part row-wise resulting in `num_partitions` digests per row of the trace.
91/// Finally, `num_partitions` digests (per row) are combined into one digest (per row) and at this
92/// point a vector commitment scheme can be called. In the case when `num_partitions` is equal to
93/// `1` (default) the prover will hash each row in one go producing one digest per row of the trace.
94#[derive(Debug, Clone, Eq, PartialEq)]
95pub struct ProofOptions {
96 num_queries: u8,
97 blowup_factor: u8,
98 grinding_factor: u8,
99 field_extension: FieldExtension,
100 fri_folding_factor: u8,
101 fri_remainder_max_degree: u8,
102 batching_constraints: BatchingMethod,
103 batching_deep: BatchingMethod,
104 partition_options: PartitionOptions,
105}
106
107// PROOF OPTIONS IMPLEMENTATION
108// ================================================================================================
109impl ProofOptions {
110 // CONSTANTS
111 // --------------------------------------------------------------------------------------------
112
113 /// Smallest allowed blowup factor which is currently set to 2.
114 ///
115 /// The smallest allowed blowup factor for a given computation is derived from degrees of
116 /// constraints defined for that computation and may be greater than 2. But no computation may
117 /// have a blowup factor smaller than 2.
118 pub const MIN_BLOWUP_FACTOR: usize = MIN_BLOWUP_FACTOR;
119
120 // CONSTRUCTORS
121 // --------------------------------------------------------------------------------------------
122 /// Returns a new instance of [ProofOptions] struct constructed from the specified parameters.
123 ///
124 /// # Panics
125 /// Panics if:
126 /// - `num_queries` is zero or greater than 255.
127 /// - `blowup_factor` is smaller than 2, greater than 128, or is not a power of two.
128 /// - `grinding_factor` is greater than 32.
129 /// - `fri_folding_factor` is not 2, 4, 8, or 16.
130 /// - `fri_remainder_max_degree` is greater than 255 or is not a power of two minus 1.
131 #[allow(clippy::too_many_arguments)]
132 pub const fn new(
133 num_queries: usize,
134 blowup_factor: usize,
135 grinding_factor: u32,
136 field_extension: FieldExtension,
137 fri_folding_factor: usize,
138 fri_remainder_max_degree: usize,
139 batching_constraints: BatchingMethod,
140 batching_deep: BatchingMethod,
141 ) -> ProofOptions {
142 // TODO: return errors instead of panicking
143 assert!(num_queries > 0, "number of queries must be greater than 0");
144 assert!(num_queries <= MAX_NUM_QUERIES, "number of queries cannot be greater than 255");
145
146 assert!(blowup_factor.is_power_of_two(), "blowup factor must be a power of 2");
147 assert!(blowup_factor >= MIN_BLOWUP_FACTOR, "blowup factor cannot be smaller than 2");
148 assert!(blowup_factor <= MAX_BLOWUP_FACTOR, "blowup factor cannot be greater than 128");
149
150 assert!(
151 grinding_factor <= MAX_GRINDING_FACTOR,
152 "grinding factor cannot be greater than 32"
153 );
154
155 assert!(fri_folding_factor.is_power_of_two(), "FRI folding factor must be a power of 2");
156 assert!(
157 fri_folding_factor >= FRI_MIN_FOLDING_FACTOR,
158 "FRI folding factor cannot be smaller than 2"
159 );
160 assert!(
161 fri_folding_factor <= FRI_MAX_FOLDING_FACTOR,
162 "FRI folding factor cannot be greater than 16"
163 );
164
165 assert!(
166 (fri_remainder_max_degree + 1).is_power_of_two(),
167 "FRI polynomial remainder degree must be one less than a power of two"
168 );
169 assert!(
170 fri_remainder_max_degree <= FRI_MAX_REMAINDER_DEGREE,
171 "FRI polynomial remainder degree cannot be greater than 255"
172 );
173
174 Self {
175 num_queries: num_queries as u8,
176 blowup_factor: blowup_factor as u8,
177 grinding_factor: grinding_factor as u8,
178 field_extension,
179 fri_folding_factor: fri_folding_factor as u8,
180 fri_remainder_max_degree: fri_remainder_max_degree as u8,
181 partition_options: PartitionOptions::new(1, 1),
182 batching_constraints,
183 batching_deep,
184 }
185 }
186
187 /// Updates the provided [ProofOptions] instance with the specified partition parameters.
188 ///
189 /// # Panics
190 /// Panics if:
191 /// - `num_partitions` is zero or greater than 16.
192 /// - `hash_rate` is zero or greater than 256.
193 pub const fn with_partitions(
194 mut self,
195 num_partitions: usize,
196 hash_rate: usize,
197 ) -> ProofOptions {
198 self.partition_options = PartitionOptions::new(num_partitions, hash_rate);
199
200 self
201 }
202
203 // PUBLIC ACCESSORS
204 // --------------------------------------------------------------------------------------------
205
206 /// Returns number of queries for a STARK proof.
207 ///
208 /// This directly impacts proof soundness as each additional query adds roughly
209 /// `log2(blowup_factor)` bits of security to a proof. However, each additional query also
210 /// increases proof size.
211 pub const fn num_queries(&self) -> usize {
212 self.num_queries as usize
213 }
214
215 /// Returns trace blowup factor for a STARK proof.
216 ///
217 /// This is the factor by which the execution trace is extended during low-degree extension. It
218 /// has a direct impact on proof soundness as each query adds roughly `log2(blowup_factor)`
219 /// bits of security to a proof. However, higher blowup factors also increases prover runtime,
220 /// and may increase proof size.
221 pub const fn blowup_factor(&self) -> usize {
222 self.blowup_factor as usize
223 }
224
225 /// Returns query seed grinding factor for a STARK proof.
226 ///
227 /// Grinding applies Proof-of-Work to the query position seed. An honest prover needs to
228 /// perform this work only once, while a dishonest prover will need to perform it every time
229 /// they try to change a commitment. Thus, higher grinding factor makes it more difficult to
230 /// forge a STARK proof. However, setting grinding factor too high (e.g. higher than 20) will
231 /// adversely affect prover time.
232 pub const fn grinding_factor(&self) -> u32 {
233 self.grinding_factor as u32
234 }
235
236 /// Specifies whether composition polynomial should be constructed in an extension field
237 /// of STARK protocol.
238 ///
239 /// Using a field extension increases maximum security level of a proof, but also has
240 /// non-negligible impact on prover performance.
241 pub const fn field_extension(&self) -> FieldExtension {
242 self.field_extension
243 }
244
245 /// Returns the offset by which the low-degree extension domain is shifted in relation to the
246 /// trace domain.
247 ///
248 /// Currently, this is hard-coded to the primitive element of the underlying base field.
249 pub const fn domain_offset<B: StarkField>(&self) -> B {
250 B::GENERATOR
251 }
252
253 /// Returns options for FRI protocol instantiated with parameters from this proof options.
254 pub fn to_fri_options(&self) -> FriOptions {
255 let folding_factor = self.fri_folding_factor as usize;
256 let remainder_max_degree = self.fri_remainder_max_degree as usize;
257 FriOptions::new(self.blowup_factor(), folding_factor, remainder_max_degree)
258 }
259
260 /// Returns the `[PartitionOptions]` used in this instance of proof options.
261 pub fn partition_options(&self) -> PartitionOptions {
262 self.partition_options
263 }
264
265 /// Returns the `[BatchingMethod]` defining the method used for batching the constraints during
266 /// the computation of the constraint composition polynomial.
267 ///
268 /// Linear batching implies that independently drawn random values per constraint will be used
269 /// to do the batching, while Algebraic batching implies that powers of a single random value
270 /// are used.
271 ///
272 /// Depending on other parameters, Algebraic batching may lead to a small reduction in the
273 /// security level of the generated proofs, but avoids extra calls to the random oracle
274 /// (i.e., hash function).
275 pub fn constraint_batching_method(&self) -> BatchingMethod {
276 self.batching_constraints
277 }
278
279 /// Returns the `[BatchingMethod]` defining the method used for batching the multi-point
280 /// quotients defining the DEEP polynomial.
281 ///
282 /// Linear batching implies that independently drawn random values per multi-point quotient
283 /// will be used to do the batching, while Algebraic batching implies that powers of a single
284 /// random value are used.
285 ///
286 /// Depending on other parameters, Algebraic batching may lead to a small reduction in the
287 /// security level of the generated proofs, but avoids extra calls to the random oracle
288 /// (i.e., hash function).
289 pub fn deep_poly_batching_method(&self) -> BatchingMethod {
290 self.batching_deep
291 }
292}
293
294impl<E: StarkField> ToElements<E> for ProofOptions {
295 /// Encodes these proof options into 3 field elements.
296 fn to_elements(&self) -> Vec<E> {
297 // encode field extension, FRI parameters, and blowup factor into a single field element
298 let mut buf = self.field_extension as u32;
299 buf = (buf << 8) | self.fri_folding_factor as u32;
300 buf = (buf << 8) | self.fri_remainder_max_degree as u32;
301 buf = (buf << 8) | self.blowup_factor as u32;
302
303 vec![E::from(buf), E::from(self.grinding_factor), E::from(self.num_queries)]
304 }
305}
306
307impl Serializable for ProofOptions {
308 /// Serializes `self` and writes the resulting bytes into the `target`.
309 fn write_into<W: ByteWriter>(&self, target: &mut W) {
310 target.write_u8(self.num_queries);
311 target.write_u8(self.blowup_factor);
312 target.write_u8(self.grinding_factor);
313 target.write(self.field_extension);
314 target.write_u8(self.fri_folding_factor);
315 target.write_u8(self.fri_remainder_max_degree);
316 target.write(self.batching_constraints);
317 target.write(self.batching_deep);
318 target.write_u8(self.partition_options.num_partitions);
319 target.write_u8(self.partition_options.hash_rate);
320 }
321}
322
323impl Deserializable for ProofOptions {
324 /// Reads proof options from the specified `source` and returns the result.
325 ///
326 /// # Errors
327 /// Returns an error of a valid proof options could not be read from the specified `source`.
328 fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
329 let result = ProofOptions::new(
330 source.read_u8()? as usize,
331 source.read_u8()? as usize,
332 source.read_u8()? as u32,
333 FieldExtension::read_from(source)?,
334 source.read_u8()? as usize,
335 source.read_u8()? as usize,
336 BatchingMethod::read_from(source)?,
337 BatchingMethod::read_from(source)?,
338 );
339 Ok(result.with_partitions(source.read_u8()? as usize, source.read_u8()? as usize))
340 }
341}
342
343// FIELD EXTENSION IMPLEMENTATION
344// ================================================================================================
345
346impl FieldExtension {
347 /// Returns `true` if this field extension is set to `None`.
348 pub const fn is_none(&self) -> bool {
349 matches!(self, Self::None)
350 }
351
352 /// Returns extension degree of this field extension.
353 pub const fn degree(&self) -> u32 {
354 match self {
355 Self::None => 1,
356 Self::Quadratic => 2,
357 Self::Cubic => 3,
358 }
359 }
360}
361
362impl Serializable for FieldExtension {
363 /// Serializes `self` and writes the resulting bytes into the `target`.
364 fn write_into<W: ByteWriter>(&self, target: &mut W) {
365 target.write_u8(*self as u8);
366 }
367
368 /// Returns an estimate of how many bytes are needed to represent self.
369 fn get_size_hint(&self) -> usize {
370 1
371 }
372}
373
374impl Deserializable for FieldExtension {
375 /// Reads a field extension enum from the specified `source`.
376 fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
377 match source.read_u8()? {
378 1 => Ok(FieldExtension::None),
379 2 => Ok(FieldExtension::Quadratic),
380 3 => Ok(FieldExtension::Cubic),
381 value => Err(DeserializationError::InvalidValue(format!(
382 "value {value} cannot be deserialized as FieldExtension enum"
383 ))),
384 }
385 }
386}
387
388// PARTITION OPTION IMPLEMENTATION
389// ================================================================================================
390
391/// Defines the parameters used to calculate partition size when committing to the traces
392/// generated during the protocol.
393///
394/// Using multiple partitions will change how vector commitments are calculated:
395/// - Input matrix columns are split into at most num_partitions partitions
396/// - For each matrix row, a hash is calculated for each partition separately
397/// - The results are merged together by one more hash iteration
398///
399/// This is especially useful when proving with multiple GPU cards where each device holds
400/// a subset of data and allows less data reshuffling when generating commitments.
401///
402/// Hash_rate parameter is used to find the optimal partition size to minimize the number
403/// of hash iterations. It specifies how many field elements are consumed by each hash iteration.
404#[derive(Debug, Clone, Copy, Eq, PartialEq)]
405pub struct PartitionOptions {
406 num_partitions: u8,
407 hash_rate: u8,
408}
409
410impl PartitionOptions {
411 /// Returns a new instance of `[PartitionOptions]`.
412 pub const fn new(num_partitions: usize, hash_rate: usize) -> Self {
413 assert!(num_partitions >= 1, "number of partitions must be greater than or equal to 1");
414 assert!(num_partitions <= 16, "number of partitions must be smaller than or equal to 16");
415
416 assert!(hash_rate >= 1, "hash rate must be greater than or equal to 1");
417 assert!(hash_rate <= 256, "hash rate must be smaller than or equal to 256");
418
419 Self {
420 num_partitions: num_partitions as u8,
421 hash_rate: hash_rate as u8,
422 }
423 }
424
425 /// Returns the size of each partition used when committing to the main and auxiliary traces as
426 /// well as the constraint evaluation trace.
427 /// The returned size is given in terms of number of columns in the field `E`.
428 pub fn partition_size<E: FieldElement>(&self, num_columns: usize) -> usize {
429 if self.num_partitions == 1 {
430 return num_columns;
431 }
432
433 // Don't separate columns that would fit inside one hash iteration. min_partition_size is
434 // the number of `E` elements that can be consumed in one hash iteration.
435 let min_partition_size = self.hash_rate as usize / E::EXTENSION_DEGREE;
436
437 cmp::max(num_columns.div_ceil(self.num_partitions as usize), min_partition_size)
438 }
439
440 /// The actual number of partitions, after the min partition size implied
441 /// by the hash rate is taken into account.
442 pub fn num_partitions<E: FieldElement>(&self, num_columns: usize) -> usize {
443 num_columns.div_ceil(self.partition_size::<E>(num_columns))
444 }
445}
446
447impl Default for PartitionOptions {
448 fn default() -> Self {
449 Self { num_partitions: 1, hash_rate: 1 }
450 }
451}
452
453// BATCHING METHOD
454// ================================================================================================
455
456/// Represents the type of batching, using randomness, used in the construction of either
457/// the constraint composition polynomial or the DEEP composition polynomial.
458///
459/// There are currently two types of batching supported:
460///
461/// 1. Linear, also called affine, where the resulting expression is a multivariate polynomial of
462/// total degree 1 in each of the random values.
463/// 2. Algebraic, also called parametric or curve batching, where the resulting expression is a
464/// univariate polynomial in one random value.
465///
466/// The main difference between the two types is that algebraic batching has low verifier randomness
467/// complexity and hence is light on the number of calls to the random oracle. However, this comes
468/// at the cost of an increase in the soundness error of the constraint batching step, i.e.,
469/// ALI, on the order of log2(C - 1) where C is the number of constraints being batched and
470/// an increase in the soundness error of the FRI batching step, on the order of log2(N - 1)
471/// where N is the number of code words being batched. Linear batching does not suffer
472/// from such a degradation but has linear verifier randomness complexity in the number of terms
473/// being batched.
474#[derive(Clone, Copy, Debug, PartialEq, Eq)]
475#[repr(u8)]
476pub enum BatchingMethod {
477 Linear = 0,
478 Algebraic = 1,
479}
480
481impl Serializable for BatchingMethod {
482 /// Serializes `self` and writes the resulting bytes into the `target`.
483 fn write_into<W: ByteWriter>(&self, target: &mut W) {
484 target.write_u8(*self as u8);
485 }
486}
487
488impl Deserializable for BatchingMethod {
489 /// Reads [BatchingMethod] from the specified `source` and returns the result.
490 ///
491 /// # Errors
492 /// Returns an error if the value does not correspond to a valid [BatchingMethod].
493 fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
494 match source.read_u8()? {
495 0 => Ok(BatchingMethod::Linear),
496 1 => Ok(BatchingMethod::Algebraic),
497 n => Err(DeserializationError::InvalidValue(format!(
498 "value {n} cannot be deserialized as a BatchingMethod enum"
499 ))),
500 }
501 }
502}
503
504// TESTS
505// ================================================================================================
506
507#[cfg(test)]
508mod tests {
509 use math::fields::{f64::BaseElement, CubeExtension};
510 use utils::{Deserializable, Serializable};
511
512 use super::{FieldExtension, PartitionOptions, ProofOptions, ToElements};
513 use crate::options::BatchingMethod;
514
515 #[test]
516 fn proof_options_to_elements() {
517 let field_extension = FieldExtension::None;
518 let fri_folding_factor = 8;
519 let fri_remainder_max_degree = 127;
520 let grinding_factor = 20;
521 let blowup_factor = 8;
522 let num_queries = 30;
523
524 let ext_fri = u32::from_le_bytes([
525 blowup_factor as u8,
526 fri_remainder_max_degree,
527 fri_folding_factor,
528 field_extension as u8,
529 ]);
530 let expected = vec![
531 BaseElement::from(ext_fri),
532 BaseElement::from(grinding_factor),
533 BaseElement::from(num_queries as u32),
534 ];
535
536 let options = ProofOptions::new(
537 num_queries,
538 blowup_factor,
539 grinding_factor,
540 field_extension,
541 fri_folding_factor as usize,
542 fri_remainder_max_degree as usize,
543 BatchingMethod::Linear,
544 BatchingMethod::Linear,
545 );
546 assert_eq!(expected, options.to_elements());
547 }
548
549 #[test]
550 fn correct_partition_sizes() {
551 type E1 = BaseElement;
552 type E3 = CubeExtension<BaseElement>;
553
554 let options = PartitionOptions::new(4, 8);
555 let columns = 7;
556 assert_eq!(8, options.partition_size::<E1>(columns));
557 assert_eq!(1, options.num_partitions::<E1>(columns));
558
559 let options = PartitionOptions::new(4, 8);
560 let columns = 70;
561 assert_eq!(18, options.partition_size::<E1>(columns));
562 assert_eq!(4, options.num_partitions::<E1>(columns));
563
564 let options = PartitionOptions::new(2, 8);
565 let columns = 7;
566 assert_eq!(4, options.partition_size::<E3>(columns));
567 assert_eq!(2, options.num_partitions::<E3>(columns));
568
569 let options: PartitionOptions = PartitionOptions::new(4, 8);
570 let columns = 7;
571 assert_eq!(2, options.partition_size::<E3>(columns));
572 assert_eq!(4, options.num_partitions::<E3>(columns));
573
574 // don't use all partitions if it would result in sizes smaller than
575 // a single hash iteration can handle
576 let options: PartitionOptions = PartitionOptions::new(4, 8);
577 let columns = 3;
578 assert_eq!(2, options.partition_size::<E3>(columns));
579 assert_eq!(2, options.num_partitions::<E3>(columns));
580 }
581
582 #[test]
583 fn serialization_proof_options() {
584 let field_extension = FieldExtension::Quadratic;
585 let fri_folding_factor = 8;
586 let fri_remainder_max_degree = 127;
587 let grinding_factor = 20;
588 let blowup_factor = 8;
589 let num_queries = 30;
590
591 let options = ProofOptions::new(
592 num_queries,
593 blowup_factor,
594 grinding_factor,
595 field_extension,
596 fri_folding_factor as usize,
597 fri_remainder_max_degree as usize,
598 BatchingMethod::Linear,
599 BatchingMethod::Algebraic,
600 );
601
602 let options_serialized = options.to_bytes();
603 let options_deserialized = ProofOptions::read_from_bytes(&options_serialized).unwrap();
604
605 assert_eq!(options, options_deserialized)
606 }
607}