triton_vm/
stark.rs

1use std::ops::Mul;
2
3use arbitrary::Arbitrary;
4use arbitrary::Unstructured;
5use itertools::Itertools;
6use itertools::izip;
7use ndarray::Zip;
8use ndarray::prelude::*;
9use num_traits::ConstOne;
10use num_traits::ConstZero;
11use num_traits::Zero;
12use rand::prelude::*;
13use rand::random;
14use rayon::prelude::*;
15use serde::Deserialize;
16use serde::Serialize;
17use twenty_first::math::ntt::intt;
18use twenty_first::math::ntt::ntt;
19use twenty_first::math::traits::FiniteField;
20use twenty_first::math::traits::ModPowU64;
21use twenty_first::math::traits::PrimitiveRootOfUnity;
22use twenty_first::prelude::*;
23
24use crate::aet::AlgebraicExecutionTrace;
25use crate::arithmetic_domain::ArithmeticDomain;
26use crate::challenges::Challenges;
27use crate::error::ProvingError;
28use crate::error::VerificationError;
29use crate::fri;
30use crate::fri::Fri;
31use crate::ndarray_helper;
32use crate::ndarray_helper::COL_AXIS;
33use crate::ndarray_helper::ROW_AXIS;
34use crate::profiler::profiler;
35use crate::proof::Claim;
36use crate::proof::Proof;
37use crate::proof_item::ProofItem;
38use crate::proof_stream::ProofStream;
39use crate::table::QuotientSegments;
40use crate::table::auxiliary_table::Evaluable;
41use crate::table::master_table::MasterAuxTable;
42use crate::table::master_table::MasterMainTable;
43use crate::table::master_table::MasterTable;
44use crate::table::master_table::all_quotients_combined;
45use crate::table::master_table::max_degree_with_origin;
46
47/// The number of segments the quotient polynomial is split into.
48/// Helps keeping the FRI domain small.
49pub const NUM_QUOTIENT_SEGMENTS: usize = air::TARGET_DEGREE as usize;
50
51/// The number of randomizer polynomials over the [extension
52/// field](XFieldElement) used in the [`STARK`](Stark). Integral for achieving
53/// zero-knowledge in [FRI](Fri).
54pub const NUM_RANDOMIZER_POLYNOMIALS: usize = 1;
55
56const NUM_DEEP_CODEWORD_COMPONENTS: usize = 3;
57
58/// Parameters for the Zero-Knowledge
59/// [Scalable Transparent ARgument of Knowledge (STARK)][stark] for Triton VM.
60///
61/// The two core methods are [`Stark::prove`] and [`Stark::verify`].
62///
63/// [stark]: https://www.iacr.org/archive/crypto2019/116940201/116940201.pdf
64#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)]
65pub struct Stark {
66    /// The conjectured security level in bits. Concretely, the system
67    /// - is perfectly complete, and
68    /// - has soundness error 2^(-security_level).
69    pub security_level: usize,
70
71    /// The ratio between the lengths of the randomized trace domain and the
72    /// [FRI domain](Stark::fri). Must be a power of 2 for efficiency reasons.
73    pub fri_expansion_factor: usize,
74
75    /// The number of randomizers for the execution trace. The trace randomizers
76    /// are integral for achieving zero-knowledge. In particular, they
77    /// achieve ZK for the (DEEP) ALI part of the zk-STARK.
78    //
79    // See also [`MasterTable::trace_randomizer_for_column`].
80    pub num_trace_randomizers: usize,
81
82    /// The number of collinearity checks to perform in [FRI](Fri).
83    pub num_collinearity_checks: usize,
84}
85
86/// The prover for Triton VM's [zk-STARK](Stark). The core method is
87/// [`prove`](Prover::prove). It is probably more convenient to call
88/// [`Stark::prove`] directly.
89///
90/// It is possible to [set the randomness seed][seed] used for deriving all
91/// prover randomness. To prevent accidental randomness re-use, the [`Prover`]
92/// does not implement [`Clone`].
93///
94/// [seed]: Prover::set_randomness_seed_which_may_break_zero_knowledge
95#[expect(missing_copy_implementations)]
96#[derive(Debug, Eq, PartialEq, Arbitrary)]
97pub struct Prover {
98    parameters: Stark,
99
100    /// The seed for all randomness used while [proving][Stark::prove].
101    ///
102    /// For Triton VM's proofs to be zero knowledge, this seed must be sampled
103    /// uniformly at random, and independently of all other input. No
104    /// information about it must reach the verifier.
105    randomness_seed: <StdRng as SeedableRng>::Seed,
106}
107
108/// The verifier for Triton VM's [zs-STARK](Stark). The core method is
109/// [`verify`](Verifier::verify). It is probably more convenient to call
110/// [`Stark::verify`] directly.
111#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, Hash, Serialize, Deserialize, Arbitrary)]
112pub struct Verifier {
113    parameters: Stark,
114}
115
116/// The domains used by the [Prover].
117///
118/// Triton VM's STARK prover & verifier heavily rely on the
119/// [Schwartz-Zippel Lemma][lemma]. Therefore, they use polynomials extensively.
120/// In order to lower the concrete computational cost, these polynomials are
121/// usually represented in value form as opposed to monomial coefficient
122/// form, _i.e._, they are usually codewords. To reduce computational costs and
123/// memory requirements even further, both the prover and the verifier generally
124/// represent a polynomial using the shortest possible codewords. For a
125/// polynomial of degree _n_, the shortest possible codeword has _n + 1_
126/// entries.
127///
128/// Using the shortest possible codewords means that the
129/// [arithmetic domains](ArithmeticDomain) associated with those codewords have
130/// different lengths. Also for performance reasons, the lengths of all
131/// arithmetic domains are powers of two. If _n + 1_ is not a power of two, the
132/// next power of two is used. This allows efficient interpolation (and
133/// evaluation) using the (inverse) [Number Theoretic Transform (NTT)](ntt).
134///
135/// The most important of these domains are contained in this struct. In some
136/// cases, a domain _could_ be replaced by another existing, longer domain to
137/// achieve the same functionality in principle. These instances are pointed
138/// out in the fields' documentation below.
139///
140/// [lemma]: https://dl.acm.org/doi/pdf/10.1145/322217.322225
141#[derive(Debug, Copy, Clone, Eq, PartialEq)]
142pub(crate) struct ProverDomains {
143    /// One of the most central domains.
144    /// Its length is dictated by the [AET][AlgebraicExecutionTrace]: must be at
145    /// least as long as the tallest column in the AET.
146    pub trace: ArithmeticDomain,
147
148    /// Derived from the trace domain. Long enough to account for trace
149    /// randomizers in addition to the trace. The trace randomizers are required
150    /// for zero-knowledge.
151    ///
152    /// For the current approach to trace randomizers to work, the randomized
153    /// trace domain must be _exactly_ twice as long as the trace domain. If the
154    /// number of trace randomizers exceeds the length of the trace, this bumps
155    /// the length of the trace domain.
156    pub randomized_trace: ArithmeticDomain,
157
158    /// This domain is _just_ large enough to perform all the necessary
159    /// computations on polynomials. Concretely, the maximal degree of a
160    /// polynomial over the quotient domain is at most only slightly larger than
161    /// the maximal degree allowed in the STARK proof, and could be equal. This
162    /// makes computation for the prover much faster.
163    ///
164    /// This domain _could_ be replaced by the [FRI domain](ProverDomains::fri)
165    /// to achieve the same functionality in principle. However, the FRI domain
166    /// is usually longer.
167    pub quotient: ArithmeticDomain,
168
169    /// See also [Stark::fri].
170    pub fri: ArithmeticDomain,
171}
172
173impl ProverDomains {
174    pub fn derive(
175        padded_height: usize,
176        num_trace_randomizers: usize,
177        fri_domain: ArithmeticDomain,
178        max_degree: isize,
179    ) -> Self {
180        let randomized_trace_len =
181            Stark::randomized_trace_len(padded_height, num_trace_randomizers);
182        let randomized_trace_domain = ArithmeticDomain::of_length(randomized_trace_len).unwrap();
183        let trace_domain = randomized_trace_domain.halve().unwrap();
184
185        let max_degree = usize::try_from(max_degree).expect("AIR should constrain the VM");
186        let quotient_domain_length = max_degree.next_power_of_two();
187        let quotient_domain = ArithmeticDomain::of_length(quotient_domain_length)
188            .unwrap()
189            .with_offset(fri_domain.offset);
190
191        Self {
192            trace: trace_domain,
193            randomized_trace: randomized_trace_domain,
194            quotient: quotient_domain,
195            fri: fri_domain,
196        }
197    }
198}
199
200impl Prover {
201    /// A [`Prover`] with a sane [randomness seed][seed].
202    ///
203    /// [seed]: Prover::set_randomness_seed_which_may_break_zero_knowledge
204    pub fn new(parameters: Stark) -> Self {
205        Self {
206            parameters,
207            randomness_seed: random(),
208        }
209    }
210
211    /// Manually set the seed for the randomness used during
212    /// [proving](Self::prove). This makes the generated [proof](Proof)
213    /// deterministic.
214    ///
215    /// # WARNING!
216    ///
217    /// Careless use of this method can break Zero-Knowledge of Triton VM. In
218    /// particular, the [verifier](Stark::verify) must learn nothing about the
219    /// supplied seed, must be unable to influence the supplied seed, and must
220    /// be unable to guess anything about the supplied seed. The latter
221    /// implies that whatever source of randomness is chosen must have
222    /// sufficient entropy.
223    ///
224    /// ### If in doubt, don't use this method.
225    //
226    // Even though this method can be used to disable or cripple one of the
227    // core promises of Triton VM, it is not marked `unsafe` because it is
228    // always _memory_ safe. See also:
229    // https://doc.rust-lang.org/std/keyword.unsafe.html
230    //
231    // The length of the name is intended to discourage use.
232    #[must_use]
233    pub fn set_randomness_seed_which_may_break_zero_knowledge(
234        mut self,
235        seed: <StdRng as SeedableRng>::Seed,
236    ) -> Self {
237        self.randomness_seed = seed;
238        self
239    }
240
241    /// See also [`Stark::prove`].
242    pub fn prove(
243        self,
244        claim: &Claim,
245        aet: &AlgebraicExecutionTrace,
246    ) -> Result<Proof, ProvingError> {
247        profiler!(start "Fiat-Shamir: claim" ("hash"));
248        let mut proof_stream = ProofStream::new();
249        proof_stream.alter_fiat_shamir_state_with(claim);
250        profiler!(stop "Fiat-Shamir: claim");
251
252        profiler!(start "derive additional parameters");
253        let padded_height = aet.padded_height();
254        let fri = self.parameters.fri(padded_height)?;
255        let domains = ProverDomains::derive(
256            padded_height,
257            self.parameters.num_trace_randomizers,
258            fri.domain,
259            self.parameters.max_degree(padded_height),
260        );
261        proof_stream.enqueue(ProofItem::Log2PaddedHeight(padded_height.ilog2()));
262        profiler!(stop "derive additional parameters");
263
264        profiler!(start "main tables");
265        profiler!(start "create" ("gen"));
266        let mut master_main_table = MasterMainTable::new(
267            aet,
268            domains,
269            self.parameters.num_trace_randomizers,
270            self.randomness_seed,
271        );
272        profiler!(stop "create");
273
274        profiler!(start "pad" ("gen"));
275        master_main_table.pad();
276        profiler!(stop "pad");
277
278        master_main_table.maybe_low_degree_extend_all_columns();
279
280        profiler!(start "Merkle tree");
281        let main_merkle_tree = master_main_table.merkle_tree();
282        profiler!(stop "Merkle tree");
283
284        profiler!(start "Fiat-Shamir" ("hash"));
285        proof_stream.enqueue(ProofItem::MerkleRoot(main_merkle_tree.root()));
286        let challenges = proof_stream.sample_scalars(Challenges::SAMPLE_COUNT);
287        let challenges = Challenges::new(challenges, claim);
288        profiler!(stop "Fiat-Shamir");
289
290        profiler!(start "extend" ("gen"));
291        let mut master_aux_table = master_main_table.extend(&challenges);
292        profiler!(stop "extend");
293        profiler!(stop "main tables");
294
295        profiler!(start "aux tables");
296        master_aux_table.maybe_low_degree_extend_all_columns();
297
298        profiler!(start "Merkle tree");
299        let aux_merkle_tree = master_aux_table.merkle_tree();
300        profiler!(stop "Merkle tree");
301
302        profiler!(start "Fiat-Shamir" ("hash"));
303        proof_stream.enqueue(ProofItem::MerkleRoot(aux_merkle_tree.root()));
304
305        // Get the weights with which to compress the many quotients into one.
306        let quotient_combination_weights =
307            proof_stream.sample_scalars(MasterAuxTable::NUM_CONSTRAINTS);
308        profiler!(stop "Fiat-Shamir");
309        profiler!(stop "aux tables");
310
311        let (fri_domain_quotient_segment_codewords, quotient_segment_polynomials) =
312            Self::compute_quotient_segments(
313                &mut master_main_table,
314                &mut master_aux_table,
315                domains.quotient,
316                &challenges,
317                &quotient_combination_weights,
318            );
319
320        profiler!(start "hash rows of quotient segments" ("hash"));
321        let interpret_xfe_as_bfes = |xfe: &XFieldElement| xfe.coefficients.to_vec();
322        let hash_row = |row: ArrayView1<_>| {
323            let row_as_bfes = row.iter().map(interpret_xfe_as_bfes).concat();
324            Tip5::hash_varlen(&row_as_bfes)
325        };
326        let quotient_segments_rows = fri_domain_quotient_segment_codewords
327            .axis_iter(ROW_AXIS)
328            .into_par_iter();
329        let fri_domain_quotient_segment_codewords_digests =
330            quotient_segments_rows.map(hash_row).collect::<Vec<_>>();
331        profiler!(stop "hash rows of quotient segments");
332        profiler!(start "Merkle tree" ("hash"));
333        let quot_merkle_tree = MerkleTree::par_new(&fri_domain_quotient_segment_codewords_digests)?;
334        let quot_merkle_tree_root = quot_merkle_tree.root();
335        proof_stream.enqueue(ProofItem::MerkleRoot(quot_merkle_tree_root));
336        profiler!(stop "Merkle tree");
337
338        debug_assert_eq!(domains.fri.length, quot_merkle_tree.num_leafs());
339
340        profiler!(start "out-of-domain rows");
341        let trace_domain_generator = master_main_table.domains().trace.generator;
342        let out_of_domain_point_curr_row = proof_stream.sample_scalars(1)[0];
343        let out_of_domain_point_next_row = trace_domain_generator * out_of_domain_point_curr_row;
344
345        let ood_main_row = master_main_table.out_of_domain_row(out_of_domain_point_curr_row);
346        let ood_main_row = MasterMainTable::try_to_main_row(ood_main_row)?;
347        proof_stream.enqueue(ProofItem::OutOfDomainMainRow(Box::new(ood_main_row)));
348
349        let ood_aux_row = master_aux_table.out_of_domain_row(out_of_domain_point_curr_row);
350        let ood_aux_row = MasterAuxTable::try_to_aux_row(ood_aux_row)?;
351        proof_stream.enqueue(ProofItem::OutOfDomainAuxRow(Box::new(ood_aux_row)));
352
353        let ood_next_main_row = master_main_table.out_of_domain_row(out_of_domain_point_next_row);
354        let ood_next_main_row = MasterMainTable::try_to_main_row(ood_next_main_row)?;
355        proof_stream.enqueue(ProofItem::OutOfDomainMainRow(Box::new(ood_next_main_row)));
356
357        let ood_next_aux_row = master_aux_table.out_of_domain_row(out_of_domain_point_next_row);
358        let ood_next_aux_row = MasterAuxTable::try_to_aux_row(ood_next_aux_row)?;
359        proof_stream.enqueue(ProofItem::OutOfDomainAuxRow(Box::new(ood_next_aux_row)));
360
361        let out_of_domain_point_curr_row_pow_num_segments =
362            out_of_domain_point_curr_row.mod_pow_u32(NUM_QUOTIENT_SEGMENTS as u32);
363        let out_of_domain_curr_row_quot_segments = quotient_segment_polynomials
364            .map(|poly| poly.evaluate(out_of_domain_point_curr_row_pow_num_segments))
365            .to_vec()
366            .try_into()
367            .unwrap();
368        proof_stream.enqueue(ProofItem::OutOfDomainQuotientSegments(
369            out_of_domain_curr_row_quot_segments,
370        ));
371        profiler!(stop "out-of-domain rows");
372
373        profiler!(start "Fiat-Shamir" ("hash"));
374        let weights = LinearCombinationWeights::sample(&mut proof_stream);
375        profiler!(stop "Fiat-Shamir");
376
377        let fri_domain_is_short_domain = domains.fri.length <= domains.quotient.length;
378        let short_domain = if fri_domain_is_short_domain {
379            domains.fri
380        } else {
381            domains.quotient
382        };
383
384        profiler!(start "linear combination");
385        profiler!(start "main" ("CC"));
386        let main_combination_poly = master_main_table.weighted_sum_of_columns(weights.main);
387        profiler!(stop "main");
388
389        profiler!(start "aux" ("CC"));
390        let aux_combination_poly = master_aux_table.weighted_sum_of_columns(weights.aux);
391        profiler!(stop "aux");
392        let main_and_aux_combination_polynomial = main_combination_poly + aux_combination_poly;
393        let main_and_aux_codeword = short_domain.evaluate(&main_and_aux_combination_polynomial);
394
395        profiler!(start "quotient" ("CC"));
396        let quotient_segments_combination_polynomial = quotient_segment_polynomials
397            .into_iter()
398            .zip_eq(weights.quot_segments)
399            .fold(Polynomial::zero(), |acc, (poly, w)| acc + poly * w);
400        let quotient_segments_combination_codeword =
401            short_domain.evaluate(&quotient_segments_combination_polynomial);
402        profiler!(stop "quotient");
403
404        profiler!(stop "linear combination");
405
406        profiler!(start "DEEP");
407        // There are (at least) two possible ways to perform the DEEP update.
408        // 1. The one used here, where main & aux codewords are DEEP'd twice:
409        //    once with the out-of- domain point for the current row (i.e., α)
410        //    and once using the out-of-domain point for the next row
411        //    (i.e., ω·α). The DEEP update's denominator is a degree-1
412        //    polynomial in both cases, namely (ω^i - α) and (ω^i - ω·α)
413        //    respectively.
414        // 2. One where the main & aux codewords are DEEP'd only once, using the
415        //    degree-2 polynomial (ω^i - α)·(ω^i - ω·α) as the denominator. This
416        //    requires a linear interpolation in the numerator:
417        //    b(ω^i) - i((b(α), α) + (b(ω·α), ω·α))(w^i).
418        //
419        // In either case, the DEEP'd quotient polynomial is an additional
420        // summand for the combination codeword: (q(ω^i) - q(α)) / (ω^i - α).
421        // All (three or two) summands are weighted and summed to form the
422        // combination codeword. The weights are sampled through the Fiat-Shamir
423        // heuristic.
424        //
425        // Both approaches are sound. The first approach is more efficient, as
426        // it requires fewer operations.
427        profiler!(start "main&aux curr row");
428        let out_of_domain_curr_row_main_and_aux_value =
429            main_and_aux_combination_polynomial.evaluate(out_of_domain_point_curr_row);
430        let main_and_aux_curr_row_deep_codeword = Self::deep_codeword(
431            &main_and_aux_codeword,
432            short_domain,
433            out_of_domain_point_curr_row,
434            out_of_domain_curr_row_main_and_aux_value,
435        );
436        profiler!(stop "main&aux curr row");
437
438        profiler!(start "main&aux next row");
439        let out_of_domain_next_row_main_and_aux_value =
440            main_and_aux_combination_polynomial.evaluate(out_of_domain_point_next_row);
441        let main_and_aux_next_row_deep_codeword = Self::deep_codeword(
442            &main_and_aux_codeword,
443            short_domain,
444            out_of_domain_point_next_row,
445            out_of_domain_next_row_main_and_aux_value,
446        );
447        profiler!(stop "main&aux next row");
448
449        profiler!(start "segmented quotient");
450        let out_of_domain_curr_row_quot_segments_value = quotient_segments_combination_polynomial
451            .evaluate(out_of_domain_point_curr_row_pow_num_segments);
452        let quotient_segments_curr_row_deep_codeword = Self::deep_codeword(
453            &quotient_segments_combination_codeword,
454            short_domain,
455            out_of_domain_point_curr_row_pow_num_segments,
456            out_of_domain_curr_row_quot_segments_value,
457        );
458        profiler!(stop "segmented quotient");
459        profiler!(stop "DEEP");
460
461        profiler!(start "combined DEEP polynomial");
462        profiler!(start "sum" ("CC"));
463        let deep_codeword = [
464            main_and_aux_curr_row_deep_codeword,
465            main_and_aux_next_row_deep_codeword,
466            quotient_segments_curr_row_deep_codeword,
467        ]
468        .into_par_iter()
469        .zip_eq(weights.deep.as_slice().unwrap())
470        .map(|(codeword, &weight)| codeword.into_par_iter().map(|c| c * weight).collect())
471        .reduce(
472            || vec![XFieldElement::ZERO; short_domain.length],
473            |left, right| left.into_iter().zip(right).map(|(l, r)| l + r).collect(),
474        );
475
476        profiler!(stop "sum");
477        let fri_combination_codeword = if fri_domain_is_short_domain {
478            deep_codeword
479        } else {
480            profiler!(start "LDE" ("LDE"));
481            let deep_codeword = domains
482                .quotient
483                .low_degree_extension(&deep_codeword, domains.fri);
484            profiler!(stop "LDE");
485            deep_codeword
486        };
487        assert_eq!(domains.fri.length, fri_combination_codeword.len());
488        profiler!(stop "combined DEEP polynomial");
489
490        profiler!(start "FRI");
491        let revealed_current_row_indices =
492            fri.prove(&fri_combination_codeword, &mut proof_stream)?;
493        assert_eq!(
494            self.parameters.num_collinearity_checks,
495            revealed_current_row_indices.len()
496        );
497        profiler!(stop "FRI");
498
499        profiler!(start "open trace leafs");
500        // Open leafs of zipped codewords at indicated positions
501        let main_row_err = |row: Vec<_>| ProvingError::TableRowConversionError {
502            expected_len: MasterMainTable::NUM_COLUMNS,
503            actual_len: row.len(),
504        };
505        let revealed_main_elems = master_main_table
506            .reveal_rows(&revealed_current_row_indices)
507            .into_iter()
508            .map(|row| row.try_into().map_err(main_row_err))
509            .try_collect()?;
510        let base_authentication_structure =
511            main_merkle_tree.authentication_structure(&revealed_current_row_indices)?;
512        proof_stream.enqueue(ProofItem::MasterMainTableRows(revealed_main_elems));
513        proof_stream.enqueue(ProofItem::AuthenticationStructure(
514            base_authentication_structure,
515        ));
516
517        let aux_row_err = |row: Vec<_>| ProvingError::TableRowConversionError {
518            expected_len: MasterAuxTable::NUM_COLUMNS,
519            actual_len: row.len(),
520        };
521        let revealed_aux_elems = master_aux_table
522            .reveal_rows(&revealed_current_row_indices)
523            .into_iter()
524            .map(|row| row.try_into().map_err(aux_row_err))
525            .try_collect()?;
526        let aux_authentication_structure =
527            aux_merkle_tree.authentication_structure(&revealed_current_row_indices)?;
528        proof_stream.enqueue(ProofItem::MasterAuxTableRows(revealed_aux_elems));
529        proof_stream.enqueue(ProofItem::AuthenticationStructure(
530            aux_authentication_structure,
531        ));
532
533        // Open quotient & combination codewords at the same positions as
534        // main & aux codewords.
535        let into_fixed_width_row =
536            |row: ArrayView1<_>| -> QuotientSegments { row.to_vec().try_into().unwrap() };
537        let revealed_quotient_segments_rows = revealed_current_row_indices
538            .iter()
539            .map(|&i| fri_domain_quotient_segment_codewords.row(i))
540            .map(into_fixed_width_row)
541            .collect_vec();
542        let revealed_quotient_authentication_structure =
543            quot_merkle_tree.authentication_structure(&revealed_current_row_indices)?;
544        proof_stream.enqueue(ProofItem::QuotientSegmentsElements(
545            revealed_quotient_segments_rows,
546        ));
547        proof_stream.enqueue(ProofItem::AuthenticationStructure(
548            revealed_quotient_authentication_structure,
549        ));
550        profiler!(stop "open trace leafs");
551
552        Ok(proof_stream.into())
553    }
554
555    fn compute_quotient_segments(
556        main_table: &mut MasterMainTable,
557        aux_table: &mut MasterAuxTable,
558        quotient_domain: ArithmeticDomain,
559        challenges: &Challenges,
560        quotient_combination_weights: &[XFieldElement],
561    ) -> (
562        Array2<XFieldElement>,
563        Array1<Polynomial<'static, XFieldElement>>,
564    ) {
565        let (Some(main_quotient_domain_codewords), Some(aux_quotient_domain_codewords)) = (
566            main_table.quotient_domain_table(),
567            aux_table.quotient_domain_table(),
568        ) else {
569            // The decision to cache the quotient domain main table can be
570            // independent of the decision to cache the quotient domain
571            // auxiliary table. Additionally, available memory is affected by
572            // other running programs. Together, this may result in one table
573            // being cached while the other is not. To compute the quotients,
574            // either both or neither are needed.[^1] For peak memory
575            // consumption, it is beneficial to clear any unused cache.
576            //
577            // Discarding the cache incurs a performance penalty later, when
578            // revealing the rows indicated by FRI. This is an acceptable
579            // tradeoff: Executing this very code path means that in the current
580            // environment, peak memory usage is a concern. Running out of
581            // memory results in abnormal termination of the prover. Slower
582            // execution is an acceptable price for normal termination.
583            //
584            // [^1]: Code using exactly one cache _could_ exist, but oh! the
585            //       engineering.
586            main_table.clear_cache();
587            aux_table.clear_cache();
588
589            profiler!(start "quotient calculation (just-in-time)");
590            let (fri_domain_quotient_segment_codewords, quotient_segment_polynomials) =
591                Self::compute_quotient_segments_with_jit_lde(
592                    main_table,
593                    aux_table,
594                    challenges,
595                    quotient_combination_weights,
596                );
597            profiler!(stop "quotient calculation (just-in-time)");
598
599            return (
600                fri_domain_quotient_segment_codewords,
601                quotient_segment_polynomials,
602            );
603        };
604
605        profiler!(start "quotient calculation (cached)" ("CC"));
606        let quotient_codeword = all_quotients_combined(
607            main_quotient_domain_codewords,
608            aux_quotient_domain_codewords,
609            main_table.domains().trace,
610            quotient_domain,
611            challenges,
612            quotient_combination_weights,
613        );
614        let quotient_codeword = Array1::from(quotient_codeword);
615        assert_eq!(quotient_domain.length, quotient_codeword.len());
616        profiler!(stop "quotient calculation (cached)");
617
618        profiler!(start "quotient LDE" ("LDE"));
619        let quotient_segment_polynomials =
620            Self::interpolate_quotient_segments(quotient_codeword, quotient_domain);
621
622        let fri_domain = main_table.domains().fri;
623        let fri_domain_quotient_segment_codewords =
624            Self::fri_domain_segment_polynomials(quotient_segment_polynomials.view(), fri_domain);
625        profiler!(stop "quotient LDE");
626
627        (
628            fri_domain_quotient_segment_codewords,
629            quotient_segment_polynomials,
630        )
631    }
632
633    /// Computes the quotient segments in a memory-friendly way, i.e., without
634    /// ever representing the entire low-degree extended trace. Instead, the
635    /// trace is extrapolated over cosets of the trace domain, and the
636    /// quotients are computed there. The resulting coset-quotients are
637    /// linearly recombined to produce the quotient segment codewords.
638    fn compute_quotient_segments_with_jit_lde(
639        main_table: &mut MasterMainTable,
640        aux_table: &mut MasterAuxTable,
641        challenges: &Challenges,
642        quotient_combination_weights: &[XFieldElement],
643    ) -> (
644        Array2<XFieldElement>,
645        Array1<Polynomial<'static, XFieldElement>>,
646    ) {
647        // This parameter regulates a time-memory tradeoff. Semantically, it is
648        // the ratio of the randomized trace length to the length of the domain
649        // used for calculating the quotient segments (aka “working domain”).
650        // When this factor is larger, there are _more_ cosets of _smaller_ size
651        // over which the trace polynomials are evaluated.
652        // Must be a power of two and lie in 2..=randomized_trace_domain.length.
653        //
654        // The requirement for the working domain to be at most as long as the
655        // trace domain, i.e., at most half the size of the randomized trace
656        // domain, is explained below.
657        const RANDOMIZED_TRACE_LEN_TO_WORKING_DOMAIN_LEN_RATIO: usize = 2;
658        const NUM_COSETS: usize =
659            NUM_QUOTIENT_SEGMENTS * RANDOMIZED_TRACE_LEN_TO_WORKING_DOMAIN_LEN_RATIO;
660
661        debug_assert!(RANDOMIZED_TRACE_LEN_TO_WORKING_DOMAIN_LEN_RATIO.is_power_of_two());
662
663        let domains = main_table.domains();
664        let mut working_domain = domains.randomized_trace;
665        for _ in 0..RANDOMIZED_TRACE_LEN_TO_WORKING_DOMAIN_LEN_RATIO.ilog2() {
666            working_domain = working_domain.halve().unwrap();
667        }
668        let working_domain = working_domain;
669
670        let num_rows = working_domain.length;
671        let coset_root_order = (num_rows * NUM_COSETS).try_into().unwrap();
672
673        // the powers of ι define `NUM_COSETS`-many cosets of the working domain
674        let iota = BFieldElement::primitive_root_of_unity(coset_root_order).unwrap();
675        let psi = domains.fri.offset;
676
677        // for every coset, evaluate constraints
678        profiler!(start "zero-initialization");
679        // column majority (“`F`”) for contiguous column slices
680        let mut quotient_multicoset_evaluations =
681            ndarray_helper::par_zeros((num_rows, NUM_COSETS).f());
682        let mut main_columns =
683            ndarray_helper::par_zeros((num_rows, MasterMainTable::NUM_COLUMNS).f());
684        let mut aux_columns =
685            ndarray_helper::par_zeros((num_rows, MasterAuxTable::NUM_COLUMNS).f());
686        profiler!(stop "zero-initialization");
687
688        profiler!(start "fetch trace randomizers");
689        let main_trace_randomizers = (0..MasterMainTable::NUM_COLUMNS)
690            .into_par_iter()
691            .map(|i| main_table.trace_randomizer_for_column(i))
692            .collect::<Vec<_>>();
693        let aux_trace_randomizers = (0..MasterAuxTable::NUM_COLUMNS)
694            .into_par_iter()
695            .map(|i| aux_table.trace_randomizer_for_column(i))
696            .collect::<Vec<_>>();
697        let main_trace_randomizers = Array1::from(main_trace_randomizers);
698        let aux_trace_randomizers = Array1::from(aux_trace_randomizers);
699        profiler!(stop "fetch trace randomizers");
700
701        profiler!(start "poly interpolate" ("LDE"));
702        main_table
703            .trace_table_mut()
704            .axis_iter_mut(COL_AXIS)
705            .into_par_iter()
706            .for_each(|mut column| intt(column.as_slice_mut().unwrap()));
707        aux_table
708            .trace_table_mut()
709            .axis_iter_mut(COL_AXIS)
710            .into_par_iter()
711            .for_each(|mut column| intt(column.as_slice_mut().unwrap()));
712        profiler!(stop "poly interpolate");
713
714        profiler!(start "calculate quotients");
715        for (coset_index, quotient_column) in
716            (0..).zip(quotient_multicoset_evaluations.columns_mut())
717        {
718            // offset by fri domain offset to avoid division-by-zero errors
719            let working_domain = working_domain.with_offset(iota.mod_pow(coset_index) * psi);
720            profiler!(start "poly evaluate" ("LDE"));
721            Zip::from(main_table.trace_table().axis_iter(COL_AXIS))
722                .and(main_columns.axis_iter_mut(COL_AXIS))
723                .par_for_each(|trace_column, target_column| {
724                    let trace_poly = Polynomial::new_borrowed(trace_column.as_slice().unwrap());
725                    Array1::from(working_domain.evaluate(&trace_poly)).move_into(target_column);
726                });
727            Zip::from(aux_table.trace_table().axis_iter(COL_AXIS))
728                .and(aux_columns.axis_iter_mut(COL_AXIS))
729                .par_for_each(|trace_column, target_column| {
730                    let trace_poly = Polynomial::new_borrowed(trace_column.as_slice().unwrap());
731                    Array1::from(working_domain.evaluate(&trace_poly)).move_into(target_column);
732                });
733            profiler!(stop "poly evaluate");
734
735            // A _randomized_ trace interpolant is:
736            //
737            //    trace_interpolant + trace_zerofier·trace_randomizer
738            //    ╶───────┬───────╴   ╶──────────────┬──────────────╴
739            //            ╵                          │
740            //   was just moved into                 ╵
741            //  `{main, aux}_columns`          still missing
742            //
743            //
744            // Knowing the shape of the trace zerofier (see also
745            // `domain.zerofier()`), and with the length of the trace domain
746            // being `n`, this is:
747            //
748            //  trace_zerofier·trace_randomizer = (X^n - 1)·trace_randomizer
749            //
750            // For reasons of efficiency, all three components (interpolant,
751            // zerofier, and randomizer) are evaluated over the
752            // `working_domain`, i.e., they are codewords. The constant
753            // `RANDOMIZED_TRACE_LEN_TO_WORKING_DOMAIN_LEN_RATIO` defines the
754            // length of the working domain in relation to the randomized trace
755            // domain. Let the length of the working domain `m`, the generator
756            // of the working domain a primitive mth root of unity ξ, and the
757            // working domain's offset γ.
758            //
759            // If the length of the working domain `m` is less than or equal the
760            // length of the trace domain, i.e., if m <= n or equivalently, if
761            // constant `RANDOMIZED_TRACE_LEN_TO_WORKING_DOMAIN_LEN_RATIO` >= 2,
762            // then evaluating the trace zerofier (X^n - 1) on the `i`th working
763            // domain value gives:
764            //
765            //   (X^n - 1)(ξ^i·γ) = (ξ^i·γ)^n - 1 = 1^i · γ^n - 1 = γ^n - 1
766            //
767            // In other words, the trace_zerofier codeword over the working
768            // domain is [working_domain_offset^n - 1; m].
769            //
770            // Should a future re-design want to consider a working domain of
771            // length equal to the randomized trace domain, or in other words,
772            // should it consider a
773            // `RANDOMIZED_TRACE_LEN_TO_WORKING_DOMAIN_LEN_RATIO` of 1, then the
774            // trace_zerofier's contribution below needs to be generalized. On
775            // working_domain's value `i`, the zerofier contribution would then
776            // be (-1)^i·γ^n - 1. In particular, note the term (-1)^i, which is
777            // absent from the trace randomizer when evaluated on working
778            // domains at most as long as the trace domain.
779            assert!(working_domain.length <= domains.trace.length);
780
781            profiler!(start "trace randomizers" ("LDE"));
782            let trace_domain_len = u64::try_from(domains.trace.length).unwrap();
783            let zerofier = working_domain.offset.mod_pow(trace_domain_len) - BFieldElement::ONE;
784
785            Zip::from(main_columns.axis_iter_mut(COL_AXIS))
786                .and(main_trace_randomizers.axis_iter(ROW_AXIS))
787                .par_for_each(|mut column, randomizer_polynomial| {
788                    let randomizer_codeword = working_domain.evaluate(&randomizer_polynomial[[]]);
789                    for (cell, randomizer) in column.iter_mut().zip(randomizer_codeword) {
790                        *cell += zerofier * randomizer;
791                    }
792                });
793            Zip::from(aux_columns.axis_iter_mut(COL_AXIS))
794                .and(aux_trace_randomizers.axis_iter(ROW_AXIS))
795                .par_for_each(|mut column, randomizer_polynomial| {
796                    let randomizer_codeword = working_domain.evaluate(&randomizer_polynomial[[]]);
797                    for (cell, randomizer) in column.iter_mut().zip(randomizer_codeword) {
798                        *cell += zerofier * randomizer;
799                    }
800                });
801            profiler!(stop "trace randomizers");
802
803            profiler!(start "AIR evaluation" ("AIR"));
804            let all_quotients = all_quotients_combined(
805                main_columns.view(),
806                aux_columns.view(),
807                domains.trace,
808                working_domain,
809                challenges,
810                quotient_combination_weights,
811            );
812            Array1::from(all_quotients).move_into(quotient_column);
813            profiler!(stop "AIR evaluation");
814        }
815        profiler!(stop "calculate quotients");
816
817        profiler!(start "segmentify");
818        let segmentification = Self::segmentify::<NUM_QUOTIENT_SEGMENTS>(
819            quotient_multicoset_evaluations,
820            psi,
821            iota,
822            domains.fri,
823        );
824        profiler!(stop "segmentify");
825
826        profiler!(start "restore original trace" ("LDE"));
827        main_table
828            .trace_table_mut()
829            .axis_iter_mut(COL_AXIS)
830            .into_par_iter()
831            .for_each(|mut column| ntt(column.as_slice_mut().unwrap()));
832        aux_table
833            .trace_table_mut()
834            .axis_iter_mut(COL_AXIS)
835            .into_par_iter()
836            .for_each(|mut column| ntt(column.as_slice_mut().unwrap()));
837        profiler!(stop "restore original trace");
838
839        segmentification
840    }
841
842    /// Map a matrix whose columns represent the evaluation of a high-degree
843    /// polynomial f on all constituents of a partition of some large domain
844    /// into smaller cosets, to
845    /// 1. a matrix of segment codewords (on the FRI domain), and
846    /// 2. an array of matching segment polynomials,
847    ///
848    /// such that the segment polynomials correspond to the interleaving split
849    /// of the original high-degree polynomial.
850    ///
851    /// For example, let f(X) have degree M·N where N is the chosen domain's
852    /// length. Then the input is an N×M matrix representing the values of
853    /// f(X) on the chosen domain and its cosets:
854    ///
855    /// ```txt
856    /// ⎛  …          …   …           ⎞  ┬
857    /// ⎜ f(coset_0)  …  f(coset_M-1) ⎟ domain length
858    /// ⎝  …          …   …           ⎠  ┴
859    ///
860    /// ├───────── NUM_COSETS ────────┤
861    /// ```
862    ///
863    /// The `NUM_SEGMENTS` (=:`K`) produced segment polynomials are f_i(X) such
864    /// that f(X) = Σ_k x^k · f_k(X^K).
865    /// For example, for `K = 2`, this is f(X) = f_E(X²) + X·f_O(X²).
866    ///
867    /// The produced segment codewords are the segment polynomial's evaluations
868    /// on the FRI domain:
869    ///
870    /// ```txt
871    /// ⎛  …            …   …             ⎞  ┬
872    /// ⎜ f_0(FRI_dom)  …  f_K-1(FRI_dom) ⎟ FRI domain length
873    /// ⎝  …            …   …             ⎠  ┴
874    ///
875    /// ├────────── NUM_SEGMENTS ─────────┤
876    /// ```
877    //
878    // The mechanics of this function are backed by some serious maths
879    // originally derived by Alan Szepieniec, and later generalized by him and
880    // Jan Ferdinand Sauer.
881    //
882    // The main idea is based on the segmentation formula. For K segments, this
883    // is
884    //
885    // f(X) = Σ_{k=0}^{K-1} X^k · f_k(X^K)
886    //
887    // where each f_k is one segment. When substituting X for X·ξ^l, where ξ is
888    // a Kth root of unity (i.e., ξ^K = 1), this gives rise to K equations,
889    // where l ∈ { 0, …, K-1 }:
890    //
891    // f(X·ξ^0)     = Σ_{k=0}^{K-1} (X·ξ^0)^k     · f_k(X^K)
892    //              ⋮
893    // f(X·ξ^(K-1)) = Σ_{k=0}^{K-1} (X·ξ^(K-1))^k · f_k(X^K)
894    //
895    // Note how the indeterminates of the f_k are identical for all rows. That
896    // is, the mapping between f's evaluations on (the “right”) cosets and f's
897    // segments is a linear one.
898    //
899    // ⎛  …       ⎞       ⎛     …               ⎞   ⎛  …       ⎞
900    // ⎜ f(X·ξ^l) ⎟   =   ⎜ …  X^k · ξ^(k·l)  … ⎟ · ⎜ f_k(X^K) ⎟             (1)
901    // ⎝  …       ⎠       ⎝     …               ⎠   ⎝  …       ⎠
902    //
903    // This function works by applying the inverse of the coefficient matrix to
904    // the function's input, i.e., to the left hand side of above equation. To
905    // compute this map efficiently, it is decomposed as follows. Operator “∘”
906    // denotes the Hadamard, i.e., element-wise product.
907    //
908    //                    ⎛     …         ⎞   ⎛ ⎛  …  ⎞   ⎛  …       ⎞ ⎞
909    //                =   ⎜ …  ξ^(k·l)  … ⎟ · ⎜ ⎜ X^k ⎟ ∘ ⎜ f_k(X^K) ⎟ ⎟
910    //                    ⎝     …         ⎠   ⎝ ⎝  …  ⎠   ⎝  …       ⎠ ⎠
911    //
912    // The coefficient matrix has dimensions K×K. Since ξ is a Kth root of
913    // unity, above matrix is an NTT matrix. That means its application can be
914    // efficiently reverted by performing iNTTs.
915    // The final step is elementwise multiplication with the vector (X^(-k)) to
916    // get the segment polynomials.
917    //
918    // For reasons of efficiency, this function does not operate on polynomials
919    // in monomial coefficient form, but on polynomial evaluations on some
920    // domain, i.e., codewords.
921    // Also for reasons of efficiency, the domain length N is a power of two,
922    // and the evaluation points are multiples of an Nth root of unity, ω. In
923    // order to avoid divisions by zero, the domain is offset by Ψ. Furthermore,
924    // the offset of a coset is some power of ι, which itself is a root of unity
925    // of order N·M, where M is the number of cosets. That is, ι^M = ω,
926    // and ω^N = 1. Summarizing, this function's input is a matrix of the
927    // following form:
928    //
929    // ⎛     …                  ⎞╷ ┬       ⎛     …                   ⎞
930    // ⎜ …  f(Ψ · ι^j · ω^i)  … ⎟i N   =   ⎜ …  f(Ψ · ι^(j + iM))  … ⎟
931    // ⎝     …                  ⎠↓ ┴       ⎝     …                   ⎠
932    // ╶─────────── j ─────────→
933    // ├─────────── M ──────────┤
934    //
935    // In order to kick off the series of steps derived & outlined above, this
936    // matrix needs to be rearranged. The desired shape can be derived by taking
937    // the left-hand side of the system of equations (1) and substituting the
938    // indeterminate X for the points at which f is evaluated, Ψ · ι^j · ω^i.
939    // Let L such that N·M = L·K. Observe that ξ being a Kth root of unity
940    // implies ξ = ω^(N/K) = ι^(N·M/K) = ι^L.
941    //
942    // ⎛  …       ⎞       ⎛     …                          ⎞ ┬
943    // ⎜ f(X·ξ^l) ⎟   ↦   ⎜ …  f(ψ · ι^(j + i·M + l·L))  … ⎟ L
944    // ⎝  …       ⎠       ⎝     …                          ⎠ ┴
945    //
946    //                    ├────────────── K ──────────────┤
947    //
948    // Helpful insights to understand the matrix re-arrangement are:
949    // - the various powers of ι, i.e., { ι^(j + i·M) | 0 ⩽ i < N, 0 ⩽ j < M },
950    //   sweep the entire input matrix (which has dimensions N×M)
951    // - ι is a (primitive) (N·M)th root of unity and thus, _all_ powers of ι
952    //   are required to index the entirety of the input matrix
953    // - the re-arranged matrix (which has dimensions L×K) has all the same
954    //   entries as the input matrix
955    //
956    // The map that satisfies all of these re-arrangement constraints is
957    // (i, j) ↦ ((j + i·M) % L, (j + i·M) // L)
958    // which has the inverse
959    // (a, b) ↦ ((a + b·L) // M, (a + b·L) % M).
960    //
961    // With all those pieces in place, it's finally time for some code.
962    fn segmentify<const NUM_SEGMENTS: usize>(
963        quotient_multicoset_evaluations: Array2<XFieldElement>,
964        psi: BFieldElement,
965        iota: BFieldElement,
966        fri_domain: ArithmeticDomain,
967    ) -> (
968        Array2<XFieldElement>,
969        Array1<Polynomial<'static, XFieldElement>>,
970    ) {
971        let num_input_rows = quotient_multicoset_evaluations.nrows();
972        let num_cosets = quotient_multicoset_evaluations.ncols();
973        let num_output_rows = num_input_rows * num_cosets / NUM_SEGMENTS;
974
975        assert!(num_input_rows.is_power_of_two());
976        assert!(num_cosets.is_power_of_two());
977        assert!(num_output_rows.is_power_of_two());
978        assert!(NUM_SEGMENTS.is_power_of_two());
979        assert!(
980            num_input_rows >= num_cosets,
981            "working domain length: {num_input_rows} versus num cosets: {num_cosets}",
982        );
983        assert!(
984            num_cosets >= NUM_SEGMENTS,
985            "num cosets: {num_cosets} versus num segments: {NUM_SEGMENTS}",
986        );
987
988        // Re-arrange data in preparation for iNTT:
989        // Move appropriate powers of ξ^(k·l) with the same k into the same row.
990        // Change the matrix's dimensions from N×M to L×K, with row majority
991        // (“`C`”) to get contiguous row slices for iNTT.
992        let mut quotient_segments = ndarray_helper::par_zeros((num_output_rows, NUM_SEGMENTS));
993        quotient_segments
994            .axis_iter_mut(ROW_AXIS)
995            .into_par_iter()
996            .enumerate()
997            .for_each(|(output_row_idx, mut output_row)| {
998                for (output_col_idx, cell) in output_row.iter_mut().enumerate() {
999                    let exponent_of_iota = output_row_idx + output_col_idx * num_output_rows;
1000                    let input_row_idx = exponent_of_iota / num_cosets;
1001                    let input_col_idx = exponent_of_iota % num_cosets;
1002                    *cell = quotient_multicoset_evaluations[[input_row_idx, input_col_idx]];
1003                }
1004            });
1005
1006        // apply inverse of Vandermonde matrix for ξ = ι^L to every row
1007        quotient_segments
1008            .axis_iter_mut(ROW_AXIS)
1009            .into_par_iter()
1010            .for_each(|mut row| intt(row.as_slice_mut().unwrap()));
1011
1012        // scale every row by Ψ^-k · ι^(-k(j+i·M))
1013        let num_threads = rayon::current_num_threads().max(1);
1014        let chunk_size = (num_output_rows / num_threads).max(1);
1015        let iota_inverse = iota.inverse();
1016        let psi_inverse = psi.inverse();
1017        quotient_segments
1018            .axis_chunks_iter_mut(ROW_AXIS, chunk_size)
1019            .into_par_iter()
1020            .enumerate()
1021            .for_each(|(chunk_idx, mut chunk_of_rows)| {
1022                let chunk_start = (chunk_idx * chunk_size).try_into().unwrap();
1023                let mut psi_iotajim_inv = psi_inverse * iota_inverse.mod_pow(chunk_start);
1024                for mut row in chunk_of_rows.rows_mut() {
1025                    let mut psi_iotajim_invk = XFieldElement::ONE;
1026                    for cell in &mut row {
1027                        *cell *= psi_iotajim_invk;
1028                        psi_iotajim_invk *= psi_iotajim_inv;
1029                    }
1030                    psi_iotajim_inv *= iota_inverse;
1031                }
1032            });
1033
1034        // low-degree extend columns from trace to FRI domain
1035        let segment_domain_offset = psi.mod_pow(NUM_SEGMENTS.try_into().unwrap());
1036        let segment_domain = ArithmeticDomain::of_length(num_output_rows)
1037            .unwrap()
1038            .with_offset(segment_domain_offset);
1039
1040        let mut quotient_codewords = Array2::zeros([fri_domain.length, NUM_SEGMENTS]);
1041        let mut quotient_polynomials = Array1::zeros([NUM_SEGMENTS]);
1042        Zip::from(quotient_segments.axis_iter(COL_AXIS))
1043            .and(quotient_codewords.axis_iter_mut(COL_AXIS))
1044            .and(quotient_polynomials.axis_iter_mut(ROW_AXIS))
1045            .par_for_each(|segment, target_codeword, target_polynomial| {
1046                // Getting a column as a contiguous piece of memory from a
1047                // row-majority matrix requires `.to_vec()`.
1048                let interpolant = segment_domain.interpolate(&segment.to_vec());
1049                let lde_codeword = fri_domain.evaluate(&interpolant);
1050                Array1::from(lde_codeword).move_into(target_codeword);
1051                Array0::from_elem((), interpolant).move_into(target_polynomial);
1052            });
1053
1054        (quotient_codewords, quotient_polynomials)
1055    }
1056
1057    fn interpolate_quotient_segments(
1058        quotient_codeword: Array1<XFieldElement>,
1059        quotient_domain: ArithmeticDomain,
1060    ) -> Array1<Polynomial<'static, XFieldElement>> {
1061        let quotient_interpolation_poly = quotient_domain.interpolate(&quotient_codeword.to_vec());
1062        let quotient_segments: [_; NUM_QUOTIENT_SEGMENTS] =
1063            Self::split_polynomial_into_segments(quotient_interpolation_poly);
1064        Array1::from(quotient_segments.to_vec())
1065    }
1066
1067    /// Losslessly split the given polynomial `f` into `N` segments of (roughly)
1068    /// equal degree. The degree of each segment is at most `f.degree() /
1069    /// N`. It holds that `f(x) = Σ_{i=0}^{N-1} x^i·f_i(x^N)`, where the
1070    /// `f_i` are the segments.
1071    ///
1072    /// For example, let
1073    /// - `N = 3`, and
1074    /// - `f(x) = 7·x^7 + 6·x^6 + 5·x^5 + 4·x^4 + 3·x^3 + 2·x^2 + 1·x + 0`.
1075    ///
1076    /// Then, the function returns the array:
1077    ///
1078    /// ```text
1079    /// [f_0(x) = 6·x^2 + 3·x + 0,
1080    ///  f_1(x) = 7·x^2 + 4·x + 1,
1081    ///  f_2(x) =         5·x + 2]
1082    /// ```
1083    ///
1084    /// The following equality holds: `f(x) == f_0(x^3) + x·f_1(x^3) +
1085    /// x^2·f_2(x^3)`.
1086    fn split_polynomial_into_segments<const N: usize, FF: FiniteField>(
1087        polynomial: Polynomial<FF>,
1088    ) -> [Polynomial<'static, FF>; N] {
1089        let mut segments = Vec::with_capacity(N);
1090        let coefficients = polynomial.into_coefficients();
1091        for segment_index in 0..N {
1092            let segment_coefficients = coefficients.iter().skip(segment_index).step_by(N);
1093            let segment = Polynomial::new(segment_coefficients.copied().collect());
1094            segments.push(segment);
1095        }
1096        segments.try_into().unwrap()
1097    }
1098
1099    fn fri_domain_segment_polynomials(
1100        quotient_segment_polynomials: ArrayView1<Polynomial<XFieldElement>>,
1101        fri_domain: ArithmeticDomain,
1102    ) -> Array2<XFieldElement> {
1103        let fri_domain_codewords: Vec<_> = quotient_segment_polynomials
1104            .into_par_iter()
1105            .flat_map(|segment| fri_domain.evaluate(segment))
1106            .collect();
1107        Array2::from_shape_vec(
1108            [fri_domain.length, NUM_QUOTIENT_SEGMENTS].f(),
1109            fri_domain_codewords,
1110        )
1111        .unwrap()
1112    }
1113
1114    /// Apply the [DEEP update](Stark::deep_update) to a polynomial in value
1115    /// form, _i.e._, to a codeword.
1116    fn deep_codeword(
1117        codeword: &[XFieldElement],
1118        domain: ArithmeticDomain,
1119        out_of_domain_point: XFieldElement,
1120        out_of_domain_value: XFieldElement,
1121    ) -> Vec<XFieldElement> {
1122        domain
1123            .domain_values()
1124            .par_iter()
1125            .zip_eq(codeword)
1126            .map(|(&in_domain_value, &in_domain_evaluation)| {
1127                Stark::deep_update(
1128                    in_domain_value,
1129                    in_domain_evaluation,
1130                    out_of_domain_point,
1131                    out_of_domain_value,
1132                )
1133            })
1134            .collect()
1135    }
1136}
1137
1138impl Verifier {
1139    pub fn new(parameters: Stark) -> Self {
1140        Self { parameters }
1141    }
1142
1143    /// See also [`Stark::verify`].
1144    pub fn verify(self, claim: &Claim, proof: &Proof) -> Result<(), VerificationError> {
1145        profiler!(start "deserialize");
1146        let mut proof_stream = ProofStream::try_from(proof)?;
1147        profiler!(stop "deserialize");
1148
1149        profiler!(start "Fiat-Shamir: Claim" ("hash"));
1150        proof_stream.alter_fiat_shamir_state_with(claim);
1151        profiler!(stop "Fiat-Shamir: Claim");
1152
1153        profiler!(start "derive additional parameters");
1154        let log_2_padded_height = proof_stream.dequeue()?.try_into_log2_padded_height()?;
1155        let padded_height = 1 << log_2_padded_height;
1156        let fri = self.parameters.fri(padded_height)?;
1157        let merkle_tree_height = fri.domain.length.ilog2();
1158
1159        // The trace domain used by the prover is not necessarily of length
1160        // `padded_height`. Concretely, this is the case if the number of trace
1161        // randomizers is larger than `padded_height`. However, the trace domain
1162        // length is guaranteed to be _exactly_ half the size of the randomized
1163        // trace domain's length, due to the current approach for randomizing
1164        // the trace.
1165        let trace_domain_len =
1166            Stark::randomized_trace_len(padded_height, self.parameters.num_trace_randomizers) / 2;
1167        profiler!(stop "derive additional parameters");
1168
1169        profiler!(start "Fiat-Shamir 1" ("hash"));
1170        let main_merkle_tree_root = proof_stream.dequeue()?.try_into_merkle_root()?;
1171        let extension_challenge_weights = proof_stream.sample_scalars(Challenges::SAMPLE_COUNT);
1172        let challenges = Challenges::new(extension_challenge_weights, claim);
1173        let auxiliary_tree_merkle_root = proof_stream.dequeue()?.try_into_merkle_root()?;
1174
1175        // Sample weights for quotient codeword, which is a part of the
1176        // combination codeword. See corresponding part in the prover for a more
1177        // detailed explanation.
1178        let quot_codeword_weights = proof_stream.sample_scalars(MasterAuxTable::NUM_CONSTRAINTS);
1179        let quot_codeword_weights = Array1::from(quot_codeword_weights);
1180        let quotient_codeword_merkle_root = proof_stream.dequeue()?.try_into_merkle_root()?;
1181        profiler!(stop "Fiat-Shamir 1");
1182
1183        profiler!(start "dequeue ood point and rows" ("hash"));
1184        let trace_domain_generator =
1185            ArithmeticDomain::generator_for_length(trace_domain_len as u64)?;
1186        let out_of_domain_point_curr_row = proof_stream.sample_scalars(1)[0];
1187        let out_of_domain_point_next_row = trace_domain_generator * out_of_domain_point_curr_row;
1188        let out_of_domain_point_curr_row_pow_num_segments =
1189            out_of_domain_point_curr_row.mod_pow_u32(NUM_QUOTIENT_SEGMENTS as u32);
1190
1191        let out_of_domain_curr_main_row =
1192            proof_stream.dequeue()?.try_into_out_of_domain_main_row()?;
1193        let out_of_domain_curr_aux_row =
1194            proof_stream.dequeue()?.try_into_out_of_domain_aux_row()?;
1195        let out_of_domain_next_main_row =
1196            proof_stream.dequeue()?.try_into_out_of_domain_main_row()?;
1197        let out_of_domain_next_aux_row =
1198            proof_stream.dequeue()?.try_into_out_of_domain_aux_row()?;
1199        let out_of_domain_curr_row_quot_segments = proof_stream
1200            .dequeue()?
1201            .try_into_out_of_domain_quot_segments()?;
1202
1203        let out_of_domain_curr_main_row = Array1::from(out_of_domain_curr_main_row.to_vec());
1204        let out_of_domain_curr_aux_row = Array1::from(out_of_domain_curr_aux_row.to_vec());
1205        let out_of_domain_next_main_row = Array1::from(out_of_domain_next_main_row.to_vec());
1206        let out_of_domain_next_aux_row = Array1::from(out_of_domain_next_aux_row.to_vec());
1207        let out_of_domain_curr_row_quot_segments =
1208            Array1::from(out_of_domain_curr_row_quot_segments.to_vec());
1209        profiler!(stop "dequeue ood point and rows");
1210
1211        profiler!(start "out-of-domain quotient element");
1212        profiler!(start "evaluate AIR" ("AIR"));
1213        let evaluated_initial_constraints = MasterAuxTable::evaluate_initial_constraints(
1214            out_of_domain_curr_main_row.view(),
1215            out_of_domain_curr_aux_row.view(),
1216            &challenges,
1217        );
1218        let evaluated_consistency_constraints = MasterAuxTable::evaluate_consistency_constraints(
1219            out_of_domain_curr_main_row.view(),
1220            out_of_domain_curr_aux_row.view(),
1221            &challenges,
1222        );
1223        let evaluated_transition_constraints = MasterAuxTable::evaluate_transition_constraints(
1224            out_of_domain_curr_main_row.view(),
1225            out_of_domain_curr_aux_row.view(),
1226            out_of_domain_next_main_row.view(),
1227            out_of_domain_next_aux_row.view(),
1228            &challenges,
1229        );
1230        let evaluated_terminal_constraints = MasterAuxTable::evaluate_terminal_constraints(
1231            out_of_domain_curr_main_row.view(),
1232            out_of_domain_curr_aux_row.view(),
1233            &challenges,
1234        );
1235        profiler!(stop "evaluate AIR");
1236
1237        profiler!(start "zerofiers");
1238        let initial_zerofier_inv = (out_of_domain_point_curr_row - bfe!(1)).inverse();
1239        let consistency_zerofier_inv =
1240            (out_of_domain_point_curr_row.mod_pow_u64(trace_domain_len as u64) - bfe!(1)).inverse();
1241        let except_last_row = out_of_domain_point_curr_row - trace_domain_generator.inverse();
1242        let transition_zerofier_inv = except_last_row * consistency_zerofier_inv;
1243        let terminal_zerofier_inv = except_last_row.inverse(); // i.e., only last row
1244        profiler!(stop "zerofiers");
1245
1246        profiler!(start "divide");
1247        let divide = |constraints: Vec<_>, z_inv| constraints.into_iter().map(move |c| c * z_inv);
1248        let initial_quotients = divide(evaluated_initial_constraints, initial_zerofier_inv);
1249        let consistency_quotients =
1250            divide(evaluated_consistency_constraints, consistency_zerofier_inv);
1251        let transition_quotients =
1252            divide(evaluated_transition_constraints, transition_zerofier_inv);
1253        let terminal_quotients = divide(evaluated_terminal_constraints, terminal_zerofier_inv);
1254
1255        let quotient_summands = initial_quotients
1256            .chain(consistency_quotients)
1257            .chain(transition_quotients)
1258            .chain(terminal_quotients)
1259            .collect_vec();
1260        profiler!(stop "divide");
1261
1262        profiler!(start "inner product" ("CC"));
1263        let out_of_domain_quotient_value =
1264            quot_codeword_weights.dot(&Array1::from(quotient_summands));
1265        profiler!(stop "inner product");
1266        profiler!(stop "out-of-domain quotient element");
1267
1268        profiler!(start "verify quotient's segments");
1269        let powers_of_out_of_domain_point_curr_row = (0..NUM_QUOTIENT_SEGMENTS as u32)
1270            .map(|exponent| out_of_domain_point_curr_row.mod_pow_u32(exponent))
1271            .collect::<Array1<_>>();
1272        let sum_of_evaluated_out_of_domain_quotient_segments =
1273            powers_of_out_of_domain_point_curr_row.dot(&out_of_domain_curr_row_quot_segments);
1274        if out_of_domain_quotient_value != sum_of_evaluated_out_of_domain_quotient_segments {
1275            return Err(VerificationError::OutOfDomainQuotientValueMismatch);
1276        };
1277        profiler!(stop "verify quotient's segments");
1278
1279        profiler!(start "Fiat-Shamir 2" ("hash"));
1280        let weights = LinearCombinationWeights::sample(&mut proof_stream);
1281        let main_and_aux_codeword_weights = weights.main_and_aux();
1282        profiler!(stop "Fiat-Shamir 2");
1283
1284        profiler!(start "sum out-of-domain values" ("CC"));
1285        let out_of_domain_curr_row_main_and_aux_value = Self::linearly_sum_main_and_aux_row(
1286            out_of_domain_curr_main_row.view(),
1287            out_of_domain_curr_aux_row.view(),
1288            main_and_aux_codeword_weights.view(),
1289        );
1290        let out_of_domain_next_row_main_and_aux_value = Self::linearly_sum_main_and_aux_row(
1291            out_of_domain_next_main_row.view(),
1292            out_of_domain_next_aux_row.view(),
1293            main_and_aux_codeword_weights.view(),
1294        );
1295        let out_of_domain_curr_row_quotient_segment_value = weights
1296            .quot_segments
1297            .dot(&out_of_domain_curr_row_quot_segments);
1298        profiler!(stop "sum out-of-domain values");
1299
1300        // verify low degree of combination polynomial with FRI
1301        profiler!(start "FRI");
1302        let revealed_fri_indices_and_elements = fri.verify(&mut proof_stream)?;
1303        let (revealed_current_row_indices, revealed_fri_values): (Vec<_>, Vec<_>) =
1304            revealed_fri_indices_and_elements.into_iter().unzip();
1305        profiler!(stop "FRI");
1306
1307        profiler!(start "check leafs");
1308        profiler!(start "dequeue main elements");
1309        let main_table_rows = proof_stream.dequeue()?.try_into_master_main_table_rows()?;
1310        let main_authentication_structure = proof_stream
1311            .dequeue()?
1312            .try_into_authentication_structure()?;
1313        let leaf_digests_main: Vec<_> = main_table_rows
1314            .par_iter()
1315            .map(|revealed_main_elem| Tip5::hash_varlen(revealed_main_elem))
1316            .collect();
1317        profiler!(stop "dequeue main elements");
1318
1319        let index_leaves = |leaves| {
1320            let index_iter = revealed_current_row_indices.iter().copied();
1321            index_iter.zip_eq(leaves).collect()
1322        };
1323        profiler!(start "Merkle verify (main tree)" ("hash"));
1324        let base_merkle_tree_inclusion_proof = MerkleTreeInclusionProof {
1325            tree_height: merkle_tree_height,
1326            indexed_leafs: index_leaves(leaf_digests_main),
1327            authentication_structure: main_authentication_structure,
1328        };
1329        if !base_merkle_tree_inclusion_proof.verify(main_merkle_tree_root) {
1330            return Err(VerificationError::MainCodewordAuthenticationFailure);
1331        }
1332        profiler!(stop "Merkle verify (main tree)");
1333
1334        profiler!(start "dequeue auxiliary elements");
1335        let aux_table_rows = proof_stream.dequeue()?.try_into_master_aux_table_rows()?;
1336        let aux_authentication_structure = proof_stream
1337            .dequeue()?
1338            .try_into_authentication_structure()?;
1339        let leaf_digests_aux = aux_table_rows
1340            .par_iter()
1341            .map(|xvalues| {
1342                let b_values = xvalues.iter().flat_map(|xfe| xfe.coefficients.to_vec());
1343                Tip5::hash_varlen(&b_values.collect_vec())
1344            })
1345            .collect::<Vec<_>>();
1346        profiler!(stop "dequeue auxiliary elements");
1347
1348        profiler!(start "Merkle verify (auxiliary tree)" ("hash"));
1349        let aux_merkle_tree_inclusion_proof = MerkleTreeInclusionProof {
1350            tree_height: merkle_tree_height,
1351            indexed_leafs: index_leaves(leaf_digests_aux),
1352            authentication_structure: aux_authentication_structure,
1353        };
1354        if !aux_merkle_tree_inclusion_proof.verify(auxiliary_tree_merkle_root) {
1355            return Err(VerificationError::AuxiliaryCodewordAuthenticationFailure);
1356        }
1357        profiler!(stop "Merkle verify (auxiliary tree)");
1358
1359        profiler!(start "dequeue quotient segments' elements");
1360        let revealed_quotient_segments_elements =
1361            proof_stream.dequeue()?.try_into_quot_segments_elements()?;
1362        let revealed_quotient_segments_digests =
1363            Self::hash_quotient_segment_elements(&revealed_quotient_segments_elements);
1364        let revealed_quotient_authentication_structure = proof_stream
1365            .dequeue()?
1366            .try_into_authentication_structure()?;
1367        profiler!(stop "dequeue quotient segments' elements");
1368
1369        profiler!(start "Merkle verify (combined quotient)" ("hash"));
1370        let quot_merkle_tree_inclusion_proof = MerkleTreeInclusionProof {
1371            tree_height: merkle_tree_height,
1372            indexed_leafs: index_leaves(revealed_quotient_segments_digests),
1373            authentication_structure: revealed_quotient_authentication_structure,
1374        };
1375        if !quot_merkle_tree_inclusion_proof.verify(quotient_codeword_merkle_root) {
1376            return Err(VerificationError::QuotientCodewordAuthenticationFailure);
1377        }
1378        profiler!(stop "Merkle verify (combined quotient)");
1379        profiler!(stop "check leafs");
1380
1381        profiler!(start "linear combination");
1382        if self.parameters.num_collinearity_checks != revealed_current_row_indices.len() {
1383            return Err(VerificationError::IncorrectNumberOfRowIndices);
1384        };
1385        if self.parameters.num_collinearity_checks != revealed_fri_values.len() {
1386            return Err(VerificationError::IncorrectNumberOfFRIValues);
1387        };
1388        if self.parameters.num_collinearity_checks != revealed_quotient_segments_elements.len() {
1389            return Err(VerificationError::IncorrectNumberOfQuotientSegmentElements);
1390        };
1391        if self.parameters.num_collinearity_checks != main_table_rows.len() {
1392            return Err(VerificationError::IncorrectNumberOfMainTableRows);
1393        };
1394        if self.parameters.num_collinearity_checks != aux_table_rows.len() {
1395            return Err(VerificationError::IncorrectNumberOfAuxTableRows);
1396        };
1397
1398        for (row_idx, main_row, aux_row, quotient_segments_elements, fri_value) in izip!(
1399            revealed_current_row_indices,
1400            main_table_rows,
1401            aux_table_rows,
1402            revealed_quotient_segments_elements,
1403            revealed_fri_values,
1404        ) {
1405            let main_row = Array1::from(main_row.to_vec());
1406            let aux_row = Array1::from(aux_row.to_vec());
1407            let current_fri_domain_value = fri.domain.domain_value(row_idx as u32);
1408
1409            profiler!(start "main & aux elements" ("CC"));
1410            let main_and_aux_curr_row_element = Self::linearly_sum_main_and_aux_row(
1411                main_row.view(),
1412                aux_row.view(),
1413                main_and_aux_codeword_weights.view(),
1414            );
1415            let quotient_segments_curr_row_element = weights
1416                .quot_segments
1417                .dot(&Array1::from(quotient_segments_elements.to_vec()));
1418            profiler!(stop "main & aux elements");
1419
1420            profiler!(start "DEEP update");
1421            let main_and_aux_curr_row_deep_value = Stark::deep_update(
1422                current_fri_domain_value,
1423                main_and_aux_curr_row_element,
1424                out_of_domain_point_curr_row,
1425                out_of_domain_curr_row_main_and_aux_value,
1426            );
1427            let main_and_aux_next_row_deep_value = Stark::deep_update(
1428                current_fri_domain_value,
1429                main_and_aux_curr_row_element,
1430                out_of_domain_point_next_row,
1431                out_of_domain_next_row_main_and_aux_value,
1432            );
1433            let quot_curr_row_deep_value = Stark::deep_update(
1434                current_fri_domain_value,
1435                quotient_segments_curr_row_element,
1436                out_of_domain_point_curr_row_pow_num_segments,
1437                out_of_domain_curr_row_quotient_segment_value,
1438            );
1439            profiler!(stop "DEEP update");
1440
1441            profiler!(start "combination codeword equality");
1442            let deep_value_components = Array1::from(vec![
1443                main_and_aux_curr_row_deep_value,
1444                main_and_aux_next_row_deep_value,
1445                quot_curr_row_deep_value,
1446            ]);
1447            if fri_value != weights.deep.dot(&deep_value_components) {
1448                return Err(VerificationError::CombinationCodewordMismatch);
1449            };
1450            profiler!(stop "combination codeword equality");
1451        }
1452        profiler!(stop "linear combination");
1453        Ok(())
1454    }
1455
1456    fn hash_quotient_segment_elements(quotient_segment_rows: &[QuotientSegments]) -> Vec<Digest> {
1457        let interpret_xfe_as_bfes = |xfe: XFieldElement| xfe.coefficients.to_vec();
1458        let collect_row_as_bfes = |row: &QuotientSegments| row.map(interpret_xfe_as_bfes).concat();
1459        quotient_segment_rows
1460            .iter()
1461            .map(collect_row_as_bfes)
1462            .map(|row| Tip5::hash_varlen(&row))
1463            .collect()
1464    }
1465
1466    fn linearly_sum_main_and_aux_row<FF>(
1467        main_row: ArrayView1<FF>,
1468        aux_row: ArrayView1<XFieldElement>,
1469        weights: ArrayView1<XFieldElement>,
1470    ) -> XFieldElement
1471    where
1472        FF: FiniteField + Into<XFieldElement>,
1473        XFieldElement: Mul<FF, Output = XFieldElement>,
1474    {
1475        profiler!(start "collect");
1476        let mut row = main_row.map(|&element| element.into());
1477        row.append(ROW_AXIS, aux_row).unwrap();
1478        profiler!(stop "collect");
1479        profiler!(start "inner product");
1480        // todo: Try to get rid of this clone. The alternative line
1481        //   `let main_and_aux_element = (&weights * &summands).sum();`
1482        //   without cloning the weights does not compile for a seemingly
1483        //   nonsensical reason.
1484        let weights = weights.to_owned();
1485        let main_and_aux_element = (weights * row).sum();
1486        profiler!(stop "inner product");
1487        main_and_aux_element
1488    }
1489}
1490
1491impl Stark {
1492    /// # Panics
1493    ///
1494    /// Panics if `log2_of_fri_expansion_factor` is zero.
1495    pub fn new(security_level: usize, log2_of_fri_expansion_factor: usize) -> Self {
1496        assert_ne!(
1497            0, log2_of_fri_expansion_factor,
1498            "FRI expansion factor must be greater than one."
1499        );
1500
1501        let fri_expansion_factor = 1 << log2_of_fri_expansion_factor;
1502        let num_collinearity_checks = security_level / log2_of_fri_expansion_factor;
1503        let num_collinearity_checks = std::cmp::max(num_collinearity_checks, 1);
1504
1505        let num_out_of_domain_rows = 2;
1506        let num_trace_randomizers = num_collinearity_checks
1507            + num_out_of_domain_rows * x_field_element::EXTENSION_DEGREE
1508            + NUM_QUOTIENT_SEGMENTS * x_field_element::EXTENSION_DEGREE;
1509
1510        Stark {
1511            security_level,
1512            fri_expansion_factor,
1513            num_trace_randomizers,
1514            num_collinearity_checks,
1515        }
1516    }
1517
1518    /// Prove the correctness of the given [Claim] using the given
1519    /// [witness](AlgebraicExecutionTrace).
1520    ///
1521    /// This method should be the first option to consider for proving.
1522    /// For more control over the proving process, see [`Prover`].
1523    //
1524    // This pass-through method guarantees fresh prover randomness at each call.
1525    pub fn prove(
1526        &self,
1527        claim: &Claim,
1528        aet: &AlgebraicExecutionTrace,
1529    ) -> Result<Proof, ProvingError> {
1530        Prover::new(*self).prove(claim, aet)
1531    }
1532
1533    /// Verify the accuracy of the given [Claim], supported by the [Proof].
1534    ///
1535    /// See also [`Verifier`].
1536    //
1537    // This pass-through method achieves symmetry with the [`Prover`].
1538    pub fn verify(&self, claim: &Claim, proof: &Proof) -> Result<(), VerificationError> {
1539        Verifier::new(*self).verify(claim, proof)
1540    }
1541
1542    /// The length of the trace-randomized, padded trace.
1543    ///
1544    /// Guaranteed to be a power of two.
1545    pub(crate) fn randomized_trace_len(
1546        padded_height: usize,
1547        num_trace_randomizers: usize,
1548    ) -> usize {
1549        let total_table_length = padded_height + num_trace_randomizers;
1550        total_table_length.next_power_of_two()
1551    }
1552
1553    pub(crate) fn interpolant_degree(padded_height: usize, num_trace_randomizers: usize) -> isize {
1554        (Self::randomized_trace_len(padded_height, num_trace_randomizers) - 1) as isize
1555    }
1556
1557    /// The upper bound to use for the maximum degree the quotients given the
1558    /// length of the trace and the number of trace randomizers. The degree
1559    /// of the quotients depends on the [AIR](air) constraints.
1560    pub fn max_degree(&self, padded_height: usize) -> isize {
1561        let interpolant_degree =
1562            Self::interpolant_degree(padded_height, self.num_trace_randomizers);
1563        let max_constraint_degree_with_origin =
1564            max_degree_with_origin(interpolant_degree, padded_height);
1565        let max_constraint_degree = max_constraint_degree_with_origin.degree as u64;
1566        let min_arithmetic_domain_length_supporting_max_constraint_degree =
1567            max_constraint_degree.next_power_of_two();
1568        let max_degree_supported_by_that_smallest_arithmetic_domain =
1569            min_arithmetic_domain_length_supporting_max_constraint_degree - 1;
1570
1571        max_degree_supported_by_that_smallest_arithmetic_domain as isize
1572    }
1573
1574    /// The parameters for [FRI](Fri). The length of the
1575    /// [FRI domain](ArithmeticDomain) has a major influence on
1576    /// [proving](Prover::prove) time. It is influenced by the length of the
1577    /// [execution trace](AlgebraicExecutionTrace) and the FRI expansion factor,
1578    /// a security parameter.
1579    ///
1580    /// In principle, the FRI domain length is also influenced by the AIR's
1581    /// degree (see [`air::TARGET_DEGREE`]). However, by segmenting the
1582    /// quotient polynomial into `TARGET_DEGREE`-many parts, that influence
1583    /// is mitigated.
1584    pub fn fri(&self, padded_height: usize) -> fri::SetupResult<Fri> {
1585        let fri_domain_length = self.fri_expansion_factor
1586            * Self::randomized_trace_len(padded_height, self.num_trace_randomizers);
1587        let coset_offset = BFieldElement::generator();
1588        let domain = ArithmeticDomain::of_length(fri_domain_length)?.with_offset(coset_offset);
1589
1590        Fri::new(
1591            domain,
1592            self.fri_expansion_factor,
1593            self.num_collinearity_checks,
1594        )
1595    }
1596
1597    /// Given `f(x)` (the in-domain evaluation of polynomial `f` in `x`), the
1598    /// domain point `x` at which polynomial `f` was evaluated, the
1599    /// out-of-domain evaluation `f(α)`, and the out-of-domain domain point
1600    /// `α`, apply the DEEP update: `(f(x) - f(α)) / (x - α)`.
1601    #[inline]
1602    fn deep_update(
1603        in_domain_point: BFieldElement,
1604        in_domain_value: XFieldElement,
1605        out_of_domain_point: XFieldElement,
1606        out_of_domain_value: XFieldElement,
1607    ) -> XFieldElement {
1608        (in_domain_value - out_of_domain_value) / (in_domain_point - out_of_domain_point)
1609    }
1610}
1611
1612impl Default for Stark {
1613    fn default() -> Self {
1614        let log_2_of_fri_expansion_factor = 2;
1615        let security_level = 160;
1616
1617        Self::new(security_level, log_2_of_fri_expansion_factor)
1618    }
1619}
1620
1621impl Default for Prover {
1622    fn default() -> Self {
1623        Self::new(Stark::default())
1624    }
1625}
1626
1627impl<'a> Arbitrary<'a> for Stark {
1628    fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result<Self> {
1629        let security_level = u.int_in_range(1..=490)?;
1630        let log_2_of_fri_expansion_factor = u.int_in_range(1..=8)?;
1631        Ok(Self::new(security_level, log_2_of_fri_expansion_factor))
1632    }
1633}
1634
1635/// Fiat-Shamir-sampled challenges to compress a row into a single
1636/// [extension field element][XFieldElement].
1637struct LinearCombinationWeights {
1638    /// of length [`MasterMainTable::NUM_COLUMNS`]
1639    main: Array1<XFieldElement>,
1640
1641    /// of length [`MasterAuxTable::NUM_COLUMNS`]
1642    aux: Array1<XFieldElement>,
1643
1644    /// of length [`NUM_QUOTIENT_SEGMENTS`]
1645    quot_segments: Array1<XFieldElement>,
1646
1647    /// of length [`NUM_DEEP_CODEWORD_COMPONENTS`]
1648    deep: Array1<XFieldElement>,
1649}
1650
1651impl LinearCombinationWeights {
1652    const NUM: usize = MasterMainTable::NUM_COLUMNS
1653        + MasterAuxTable::NUM_COLUMNS
1654        + NUM_QUOTIENT_SEGMENTS
1655        + NUM_DEEP_CODEWORD_COMPONENTS;
1656
1657    fn sample(proof_stream: &mut ProofStream) -> Self {
1658        const MAIN_END: usize = MasterMainTable::NUM_COLUMNS;
1659        const AUX_END: usize = MAIN_END + MasterAuxTable::NUM_COLUMNS;
1660        const QUOT_END: usize = AUX_END + NUM_QUOTIENT_SEGMENTS;
1661
1662        let weights = proof_stream.sample_scalars(Self::NUM);
1663
1664        Self {
1665            main: weights[..MAIN_END].to_vec().into(),
1666            aux: weights[MAIN_END..AUX_END].to_vec().into(),
1667            quot_segments: weights[AUX_END..QUOT_END].to_vec().into(),
1668            deep: weights[QUOT_END..].to_vec().into(),
1669        }
1670    }
1671
1672    fn main_and_aux(&self) -> Array1<XFieldElement> {
1673        let main = self.main.clone().into_iter();
1674        main.chain(self.aux.clone()).collect()
1675    }
1676}
1677
1678#[cfg(test)]
1679#[cfg_attr(coverage_nightly, coverage(off))]
1680pub(crate) mod tests {
1681    use std::collections::HashMap;
1682    use std::collections::HashSet;
1683    use std::fmt::Formatter;
1684
1685    use air::AIR;
1686    use air::challenge_id::ChallengeId::StandardInputIndeterminate;
1687    use air::challenge_id::ChallengeId::StandardOutputIndeterminate;
1688    use air::cross_table_argument::CrossTableArg;
1689    use air::cross_table_argument::EvalArg;
1690    use air::cross_table_argument::GrandCrossTableArg;
1691    use air::table::TableId;
1692    use air::table::cascade::CascadeTable;
1693    use air::table::hash::HashTable;
1694    use air::table::jump_stack::JumpStackTable;
1695    use air::table::lookup::LookupTable;
1696    use air::table::op_stack::OpStackTable;
1697    use air::table::processor::ProcessorTable;
1698    use air::table::program::ProgramTable;
1699    use air::table::ram;
1700    use air::table::ram::RamTable;
1701    use air::table::u32::U32Table;
1702    use air::table_column::MasterAuxColumn;
1703    use air::table_column::MasterMainColumn;
1704    use air::table_column::OpStackMainColumn;
1705    use air::table_column::ProcessorAuxColumn::InputTableEvalArg;
1706    use air::table_column::ProcessorAuxColumn::OutputTableEvalArg;
1707    use air::table_column::ProcessorMainColumn;
1708    use air::table_column::RamMainColumn;
1709    use assert2::assert;
1710    use assert2::check;
1711    use assert2::let_assert;
1712    use constraint_circuit::ConstraintCircuitBuilder;
1713    use isa::error::OpStackError;
1714    use isa::instruction::Instruction;
1715    use isa::op_stack::OpStackElement;
1716    use itertools::izip;
1717    use proptest::collection::vec;
1718    use proptest::prelude::*;
1719    use proptest::test_runner::TestCaseResult;
1720    use proptest_arbitrary_interop::arb;
1721    use rand::Rng;
1722    use rand::prelude::*;
1723    use strum::EnumCount;
1724    use strum::IntoEnumIterator;
1725    use test_strategy::proptest;
1726    use thiserror::Error;
1727    use twenty_first::math::other::random_elements;
1728
1729    use super::*;
1730    use crate::PublicInput;
1731    use crate::config::CacheDecision;
1732    use crate::error::InstructionError;
1733    use crate::shared_tests::TestableProgram;
1734    use crate::table::auxiliary_table;
1735    use crate::table::auxiliary_table::Evaluable;
1736    use crate::table::master_table::MasterAuxTable;
1737    use crate::triton_program;
1738    use crate::vm::NonDeterminism;
1739    use crate::vm::VM;
1740    use crate::vm::tests::ProgramForMerkleTreeUpdate;
1741    use crate::vm::tests::ProgramForRecurseOrReturn;
1742    use crate::vm::tests::ProgramForSpongeAndHashInstructions;
1743    use crate::vm::tests::property_based_test_program_for_and;
1744    use crate::vm::tests::property_based_test_program_for_assert_vector;
1745    use crate::vm::tests::property_based_test_program_for_div_mod;
1746    use crate::vm::tests::property_based_test_program_for_eq;
1747    use crate::vm::tests::property_based_test_program_for_is_u32;
1748    use crate::vm::tests::property_based_test_program_for_log2floor;
1749    use crate::vm::tests::property_based_test_program_for_lsb;
1750    use crate::vm::tests::property_based_test_program_for_lt;
1751    use crate::vm::tests::property_based_test_program_for_pop_count;
1752    use crate::vm::tests::property_based_test_program_for_pow;
1753    use crate::vm::tests::property_based_test_program_for_random_ram_access;
1754    use crate::vm::tests::property_based_test_program_for_split;
1755    use crate::vm::tests::property_based_test_program_for_xb_dot_step;
1756    use crate::vm::tests::property_based_test_program_for_xor;
1757    use crate::vm::tests::property_based_test_program_for_xx_dot_step;
1758    use crate::vm::tests::test_program_0_lt_0;
1759    use crate::vm::tests::test_program_claim_in_ram_corresponds_to_currently_running_program;
1760    use crate::vm::tests::test_program_for_add_mul_invert;
1761    use crate::vm::tests::test_program_for_and;
1762    use crate::vm::tests::test_program_for_assert_vector;
1763    use crate::vm::tests::test_program_for_call_recurse_return;
1764    use crate::vm::tests::test_program_for_div_mod;
1765    use crate::vm::tests::test_program_for_divine;
1766    use crate::vm::tests::test_program_for_eq;
1767    use crate::vm::tests::test_program_for_halt;
1768    use crate::vm::tests::test_program_for_hash;
1769    use crate::vm::tests::test_program_for_log2floor;
1770    use crate::vm::tests::test_program_for_lsb;
1771    use crate::vm::tests::test_program_for_lt;
1772    use crate::vm::tests::test_program_for_many_sponge_instructions;
1773    use crate::vm::tests::test_program_for_merkle_step_left_sibling;
1774    use crate::vm::tests::test_program_for_merkle_step_mem_left_sibling;
1775    use crate::vm::tests::test_program_for_merkle_step_mem_right_sibling;
1776    use crate::vm::tests::test_program_for_merkle_step_right_sibling;
1777    use crate::vm::tests::test_program_for_pop_count;
1778    use crate::vm::tests::test_program_for_pow;
1779    use crate::vm::tests::test_program_for_push_pop_dup_swap_nop;
1780    use crate::vm::tests::test_program_for_read_io_write_io;
1781    use crate::vm::tests::test_program_for_recurse_or_return;
1782    use crate::vm::tests::test_program_for_skiz;
1783    use crate::vm::tests::test_program_for_split;
1784    use crate::vm::tests::test_program_for_sponge_instructions;
1785    use crate::vm::tests::test_program_for_sponge_instructions_2;
1786    use crate::vm::tests::test_program_for_starting_with_pop_count;
1787    use crate::vm::tests::test_program_for_write_mem_read_mem;
1788    use crate::vm::tests::test_program_for_x_invert;
1789    use crate::vm::tests::test_program_for_xb_mul;
1790    use crate::vm::tests::test_program_for_xor;
1791    use crate::vm::tests::test_program_for_xx_add;
1792    use crate::vm::tests::test_program_for_xx_mul;
1793    use crate::vm::tests::test_program_hash_nop_nop_lt;
1794
1795    impl Stark {
1796        pub const LOW_SECURITY_LEVEL: usize = 32;
1797
1798        pub fn low_security() -> Self {
1799            let log_expansion_factor = 2;
1800            Stark::new(Self::LOW_SECURITY_LEVEL, log_expansion_factor)
1801        }
1802    }
1803
1804    #[proptest]
1805    fn two_default_provers_have_different_randomness_seeds() {
1806        let seed = || Prover::default().randomness_seed;
1807        prop_assert_ne!(seed(), seed());
1808    }
1809
1810    #[test]
1811    fn quotient_segments_are_independent_of_fri_table_caching() {
1812        // ensure caching _can_ happen by overwriting environment variables
1813        crate::config::overwrite_lde_trace_caching_to(CacheDecision::Cache);
1814
1815        let test_program = TestableProgram::new(triton_program!(halt));
1816        let stark = test_program.stark;
1817        let artifacts = test_program.generate_proof_artifacts();
1818        let mut main = artifacts.master_main_table;
1819        let mut aux = artifacts.master_aux_table;
1820        let ch = artifacts.challenges;
1821        let padded_height = main.trace_table().nrows();
1822        let fri_dom = stark.fri(padded_height).unwrap().domain;
1823        let max_degree = stark.max_degree(padded_height);
1824        let num_trace_randos = stark.num_trace_randomizers;
1825        let domains = ProverDomains::derive(padded_height, num_trace_randos, fri_dom, max_degree);
1826        let quot_dom = domains.quotient;
1827        let weights = StdRng::seed_from_u64(1632525295622789151)
1828            .random::<[XFieldElement; MasterAuxTable::NUM_CONSTRAINTS]>();
1829
1830        debug_assert!(main.fri_domain_table().is_none());
1831        debug_assert!(aux.fri_domain_table().is_none());
1832        let jit_segments =
1833            Prover::compute_quotient_segments(&mut main, &mut aux, quot_dom, &ch, &weights);
1834
1835        debug_assert!(main.fri_domain_table().is_none());
1836        main.maybe_low_degree_extend_all_columns();
1837        debug_assert!(main.fri_domain_table().is_some());
1838
1839        debug_assert!(aux.fri_domain_table().is_none());
1840        aux.maybe_low_degree_extend_all_columns();
1841        debug_assert!(aux.fri_domain_table().is_some());
1842
1843        let cache_segments =
1844            Prover::compute_quotient_segments(&mut main, &mut aux, quot_dom, &ch, &weights);
1845
1846        assert_eq!(jit_segments, cache_segments);
1847    }
1848
1849    /// [`Stark::compute_quotient_segments`] takes mutable references to both,
1850    /// the main and the auxiliary tables. It is vital that certain
1851    /// information is _not_ mutated.
1852    #[test]
1853    fn computing_quotient_segments_does_not_change_execution_trace() {
1854        fn assert_no_trace_mutation(cache_decision: CacheDecision) {
1855            crate::config::overwrite_lde_trace_caching_to(cache_decision);
1856
1857            let test_program = TestableProgram::new(triton_program!(halt));
1858            let stark = test_program.stark;
1859            let artifacts = test_program.generate_proof_artifacts();
1860            let mut main = artifacts.master_main_table;
1861            let mut aux = artifacts.master_aux_table;
1862            let ch = artifacts.challenges;
1863
1864            let original_main_trace = main.trace_table().to_owned();
1865            let original_aux_trace = aux.trace_table().to_owned();
1866
1867            let padded_height = main.trace_table().nrows();
1868            let fri_dom = stark.fri(padded_height).unwrap().domain;
1869            let max_degree = stark.max_degree(padded_height);
1870            let num_trace_randos = stark.num_trace_randomizers;
1871            let domains =
1872                ProverDomains::derive(padded_height, num_trace_randos, fri_dom, max_degree);
1873            let quot_dom = domains.quotient;
1874
1875            if cache_decision == CacheDecision::Cache {
1876                main.maybe_low_degree_extend_all_columns();
1877                assert!(main.fri_domain_table().is_some());
1878
1879                aux.maybe_low_degree_extend_all_columns();
1880                assert!(aux.fri_domain_table().is_some());
1881            }
1882
1883            let weights = StdRng::seed_from_u64(15157673430940347283)
1884                .random::<[XFieldElement; MasterAuxTable::NUM_CONSTRAINTS]>();
1885            let _segments =
1886                Prover::compute_quotient_segments(&mut main, &mut aux, quot_dom, &ch, &weights);
1887
1888            assert_eq!(original_main_trace, main.trace_table());
1889            assert_eq!(original_aux_trace, aux.trace_table());
1890        }
1891
1892        assert_no_trace_mutation(CacheDecision::Cache);
1893        assert_no_trace_mutation(CacheDecision::NoCache);
1894    }
1895
1896    #[test]
1897    fn supplying_prover_randomness_seed_fully_derandomizes_produced_proof() {
1898        let TestableProgram {
1899            program,
1900            public_input,
1901            non_determinism,
1902            stark,
1903        } = program_executing_every_instruction();
1904
1905        let claim = Claim::about_program(&program).with_input(public_input.clone());
1906        let (aet, output) = VM::trace_execution(program, public_input, non_determinism).unwrap();
1907        let claim = claim.with_output(output);
1908
1909        let mut rng = StdRng::seed_from_u64(3351975627407608972);
1910        let proof = Prover::new(stark)
1911            .set_randomness_seed_which_may_break_zero_knowledge(rng.random())
1912            .prove(&claim, &aet)
1913            .unwrap();
1914
1915        insta::assert_snapshot!(
1916            Tip5::hash(&proof),
1917            @"17275651906185656762,\
1918              13250937299792022858,\
1919              05731754925513787901,\
1920              05512095638892086027,\
1921              08634562101877660478",
1922        );
1923    }
1924
1925    #[test]
1926    fn print_ram_table_example_for_specification() {
1927        let program = triton_program!(
1928            push 20 push 100 write_mem 1 pop 1  // write 20 to address 100
1929            push 5 push 6 push 7 push 8 push 9
1930            push 42 write_mem 5 pop 1           // write 5..=9 to addresses 42..=46
1931            push 42 read_mem 1 pop 2            // read from address 42
1932            push 45 read_mem 3 pop 4            // read from address 42..=44
1933            push 17 push 18 push 19
1934            push 43 write_mem 3 pop 1           // write 17..=19 to addresses 43..=45
1935            push 46 read_mem 5 pop 1 pop 5      // read from addresses 42..=46
1936            push 42 read_mem 1 pop 2            // read from address 42
1937            push 100 read_mem 1 pop 2           // read from address 100
1938            halt
1939        );
1940        let master_main_table = TestableProgram::new(program)
1941            .generate_proof_artifacts()
1942            .master_main_table;
1943
1944        println!();
1945        println!("Processor Table:\n");
1946        println!("| clk | ci  | nia | st0 | st1 | st2 | st3 | st4 | st5 |");
1947        println!("|----:|:----|:----|----:|----:|----:|----:|----:|----:|");
1948        for row in master_main_table
1949            .table(TableId::Processor)
1950            .rows()
1951            .into_iter()
1952            .take(40)
1953        {
1954            let clk = row[ProcessorMainColumn::CLK.main_index()].to_string();
1955            let st0 = row[ProcessorMainColumn::ST0.main_index()].to_string();
1956            let st1 = row[ProcessorMainColumn::ST1.main_index()].to_string();
1957            let st2 = row[ProcessorMainColumn::ST2.main_index()].to_string();
1958            let st3 = row[ProcessorMainColumn::ST3.main_index()].to_string();
1959            let st4 = row[ProcessorMainColumn::ST4.main_index()].to_string();
1960            let st5 = row[ProcessorMainColumn::ST5.main_index()].to_string();
1961
1962            let (ci, nia) = ci_and_nia_from_master_table_row(row);
1963
1964            let interesting_cols = [clk, ci, nia, st0, st1, st2, st3, st4, st5];
1965            let interesting_cols = interesting_cols
1966                .iter()
1967                .map(|ff| format!("{:>10}", format!("{ff}")))
1968                .join(" | ");
1969            println!("| {interesting_cols} |");
1970        }
1971        println!();
1972        println!("RAM Table:\n");
1973        println!("| clk | type | pointer | value | iord |");
1974        println!("|----:|:-----|--------:|------:|-----:|");
1975        for row in master_main_table
1976            .table(TableId::Ram)
1977            .rows()
1978            .into_iter()
1979            .take(25)
1980        {
1981            let clk = row[RamMainColumn::CLK.main_index()].to_string();
1982            let ramp = row[RamMainColumn::RamPointer.main_index()].to_string();
1983            let ramv = row[RamMainColumn::RamValue.main_index()].to_string();
1984            let iord = row[RamMainColumn::InverseOfRampDifference.main_index()].to_string();
1985
1986            let instruction_type = match row[RamMainColumn::InstructionType.main_index()] {
1987                ram::INSTRUCTION_TYPE_READ => "read",
1988                ram::INSTRUCTION_TYPE_WRITE => "write",
1989                ram::PADDING_INDICATOR => "pad",
1990                _ => "-",
1991            }
1992            .to_string();
1993
1994            let interesting_cols = [clk, instruction_type, ramp, ramv, iord];
1995            let interesting_cols = interesting_cols
1996                .iter()
1997                .map(|ff| format!("{:>10}", format!("{ff}")))
1998                .join(" | ");
1999            println!("| {interesting_cols} |");
2000        }
2001    }
2002
2003    #[test]
2004    fn print_op_stack_table_example_for_specification() {
2005        let num_interesting_rows = 30;
2006        let fake_op_stack_size = 4;
2007
2008        let program = triton_program! {
2009            push 42 push 43 push 44 push 45 push 46 push 47 push 48
2010            nop pop 1 pop 2 pop 1
2011            push 77 swap 3 push 78 swap 3 push 79
2012            pop 1 pop 2 pop 3
2013            halt
2014        };
2015        let master_main_table = TestableProgram::new(program)
2016            .generate_proof_artifacts()
2017            .master_main_table;
2018
2019        println!();
2020        println!("Processor Table:");
2021        println!("| clk | ci  | nia | st0 | st1 | st2 | st3 | underflow | pointer |");
2022        println!("|----:|:----|----:|----:|----:|----:|----:|:----------|--------:|");
2023        for row in master_main_table
2024            .table(TableId::Processor)
2025            .rows()
2026            .into_iter()
2027            .take(num_interesting_rows)
2028        {
2029            let clk = row[ProcessorMainColumn::CLK.main_index()].to_string();
2030            let st0 = row[ProcessorMainColumn::ST0.main_index()].to_string();
2031            let st1 = row[ProcessorMainColumn::ST1.main_index()].to_string();
2032            let st2 = row[ProcessorMainColumn::ST2.main_index()].to_string();
2033            let st3 = row[ProcessorMainColumn::ST3.main_index()].to_string();
2034            let st4 = row[ProcessorMainColumn::ST4.main_index()].to_string();
2035            let st5 = row[ProcessorMainColumn::ST5.main_index()].to_string();
2036            let st6 = row[ProcessorMainColumn::ST6.main_index()].to_string();
2037            let st7 = row[ProcessorMainColumn::ST7.main_index()].to_string();
2038            let st8 = row[ProcessorMainColumn::ST8.main_index()].to_string();
2039            let st9 = row[ProcessorMainColumn::ST9.main_index()].to_string();
2040
2041            let osp = row[ProcessorMainColumn::OpStackPointer.main_index()];
2042            let osp =
2043                (osp.value() + fake_op_stack_size).saturating_sub(OpStackElement::COUNT as u64);
2044
2045            let underflow_size = osp.saturating_sub(fake_op_stack_size);
2046            let underflow_candidates = [st4, st5, st6, st7, st8, st9];
2047            let underflow = underflow_candidates
2048                .into_iter()
2049                .take(underflow_size as usize);
2050            let underflow = underflow.map(|ff| format!("{:>2}", format!("{ff}")));
2051            let underflow = format!("[{}]", underflow.collect_vec().join(", "));
2052
2053            let osp = osp.to_string();
2054            let (ci, nia) = ci_and_nia_from_master_table_row(row);
2055
2056            let interesting_cols = [clk, ci, nia, st0, st1, st2, st3, underflow, osp];
2057            let interesting_cols = interesting_cols
2058                .map(|ff| format!("{:>10}", format!("{ff}")))
2059                .join(" | ");
2060            println!("| {interesting_cols} |");
2061        }
2062
2063        println!();
2064        println!("Op Stack Table:");
2065        println!("| clk | ib1 | pointer | value |");
2066        println!("|----:|----:|--------:|------:|");
2067        for row in master_main_table
2068            .table(TableId::OpStack)
2069            .rows()
2070            .into_iter()
2071            .take(num_interesting_rows)
2072        {
2073            let clk = row[OpStackMainColumn::CLK.main_index()].to_string();
2074            let ib1 = row[OpStackMainColumn::IB1ShrinkStack.main_index()].to_string();
2075
2076            let osp = row[OpStackMainColumn::StackPointer.main_index()];
2077            let osp =
2078                (osp.value() + fake_op_stack_size).saturating_sub(OpStackElement::COUNT as u64);
2079            let osp = osp.to_string();
2080
2081            let value = row[OpStackMainColumn::FirstUnderflowElement.main_index()].to_string();
2082
2083            let interesting_cols = [clk, ib1, osp, value];
2084            let interesting_cols = interesting_cols
2085                .map(|ff| format!("{:>10}", format!("{ff}")))
2086                .join(" | ");
2087            println!("| {interesting_cols} |");
2088        }
2089    }
2090
2091    fn ci_and_nia_from_master_table_row(row: ArrayView1<BFieldElement>) -> (String, String) {
2092        let curr_instruction = row[ProcessorMainColumn::CI.main_index()].value();
2093        let next_instruction_or_arg = row[ProcessorMainColumn::NIA.main_index()].value();
2094
2095        let curr_instruction = Instruction::try_from(curr_instruction).unwrap();
2096        let nia = curr_instruction
2097            .arg()
2098            .map(|_| next_instruction_or_arg.to_string())
2099            .unwrap_or_default();
2100        (curr_instruction.name().to_string(), nia)
2101    }
2102
2103    /// To be used with `-- --nocapture`. Has mainly informative purpose.
2104    #[test]
2105    fn print_all_constraint_degrees() {
2106        let padded_height = 2;
2107        let num_trace_randomizers = 2;
2108        let interpolant_degree = Stark::interpolant_degree(padded_height, num_trace_randomizers);
2109        for deg in auxiliary_table::all_degrees_with_origin(interpolant_degree, padded_height) {
2110            println!("{deg}");
2111        }
2112    }
2113
2114    #[test]
2115    fn check_io_terminals() {
2116        let read_nop_program = triton_program!(
2117            read_io 3 nop nop write_io 2 push 17 write_io 1 halt
2118        );
2119        let artifacts = TestableProgram::new(read_nop_program)
2120            .with_input(bfe_vec![3, 5, 7])
2121            .generate_proof_artifacts();
2122
2123        let processor_table = artifacts.master_aux_table.table(TableId::Processor);
2124        let processor_table_last_row = processor_table.slice(s![-1, ..]);
2125        let ptie = processor_table_last_row[InputTableEvalArg.aux_index()];
2126        let ine = EvalArg::compute_terminal(
2127            &artifacts.claim.input,
2128            EvalArg::default_initial(),
2129            artifacts.challenges[StandardInputIndeterminate],
2130        );
2131        check!(ptie == ine);
2132
2133        let ptoe = processor_table_last_row[OutputTableEvalArg.aux_index()];
2134        let oute = EvalArg::compute_terminal(
2135            &artifacts.claim.output,
2136            EvalArg::default_initial(),
2137            artifacts.challenges[StandardOutputIndeterminate],
2138        );
2139        check!(ptoe == oute);
2140    }
2141
2142    #[test]
2143    fn constraint_polynomials_use_right_number_of_variables() {
2144        let challenges = Challenges::default();
2145        let main_row = Array1::<BFieldElement>::zeros(MasterMainTable::NUM_COLUMNS);
2146        let aux_row = Array1::zeros(MasterAuxTable::NUM_COLUMNS);
2147
2148        let br = main_row.view();
2149        let er = aux_row.view();
2150
2151        MasterAuxTable::evaluate_initial_constraints(br, er, &challenges);
2152        MasterAuxTable::evaluate_consistency_constraints(br, er, &challenges);
2153        MasterAuxTable::evaluate_transition_constraints(br, er, br, er, &challenges);
2154        MasterAuxTable::evaluate_terminal_constraints(br, er, &challenges);
2155    }
2156
2157    #[test]
2158    fn print_number_of_all_constraints_per_table() {
2159        let table_names = [
2160            "program table",
2161            "processor table",
2162            "op stack table",
2163            "ram table",
2164            "jump stack table",
2165            "hash table",
2166            "cascade table",
2167            "lookup table",
2168            "u32 table",
2169            "cross-table arg",
2170        ];
2171        let circuit_builder = ConstraintCircuitBuilder::new();
2172        let all_init = [
2173            ProgramTable::initial_constraints(&circuit_builder),
2174            ProcessorTable::initial_constraints(&circuit_builder),
2175            OpStackTable::initial_constraints(&circuit_builder),
2176            RamTable::initial_constraints(&circuit_builder),
2177            JumpStackTable::initial_constraints(&circuit_builder),
2178            HashTable::initial_constraints(&circuit_builder),
2179            CascadeTable::initial_constraints(&circuit_builder),
2180            LookupTable::initial_constraints(&circuit_builder),
2181            U32Table::initial_constraints(&circuit_builder),
2182            GrandCrossTableArg::initial_constraints(&circuit_builder),
2183        ]
2184        .map(|vec| vec.len());
2185        let circuit_builder = ConstraintCircuitBuilder::new();
2186        let all_cons = [
2187            ProgramTable::consistency_constraints(&circuit_builder),
2188            ProcessorTable::consistency_constraints(&circuit_builder),
2189            OpStackTable::consistency_constraints(&circuit_builder),
2190            RamTable::consistency_constraints(&circuit_builder),
2191            JumpStackTable::consistency_constraints(&circuit_builder),
2192            HashTable::consistency_constraints(&circuit_builder),
2193            CascadeTable::consistency_constraints(&circuit_builder),
2194            LookupTable::consistency_constraints(&circuit_builder),
2195            U32Table::consistency_constraints(&circuit_builder),
2196            GrandCrossTableArg::consistency_constraints(&circuit_builder),
2197        ]
2198        .map(|vec| vec.len());
2199        let circuit_builder = ConstraintCircuitBuilder::new();
2200        let all_trans = [
2201            ProgramTable::transition_constraints(&circuit_builder),
2202            ProcessorTable::transition_constraints(&circuit_builder),
2203            OpStackTable::transition_constraints(&circuit_builder),
2204            RamTable::transition_constraints(&circuit_builder),
2205            JumpStackTable::transition_constraints(&circuit_builder),
2206            HashTable::transition_constraints(&circuit_builder),
2207            CascadeTable::transition_constraints(&circuit_builder),
2208            LookupTable::transition_constraints(&circuit_builder),
2209            U32Table::transition_constraints(&circuit_builder),
2210            GrandCrossTableArg::transition_constraints(&circuit_builder),
2211        ]
2212        .map(|vec| vec.len());
2213        let circuit_builder = ConstraintCircuitBuilder::new();
2214        let all_term = [
2215            ProgramTable::terminal_constraints(&circuit_builder),
2216            ProcessorTable::terminal_constraints(&circuit_builder),
2217            OpStackTable::terminal_constraints(&circuit_builder),
2218            RamTable::terminal_constraints(&circuit_builder),
2219            JumpStackTable::terminal_constraints(&circuit_builder),
2220            HashTable::terminal_constraints(&circuit_builder),
2221            CascadeTable::terminal_constraints(&circuit_builder),
2222            LookupTable::terminal_constraints(&circuit_builder),
2223            U32Table::terminal_constraints(&circuit_builder),
2224            GrandCrossTableArg::terminal_constraints(&circuit_builder),
2225        ]
2226        .map(|vec| vec.len());
2227
2228        let num_total_init: usize = all_init.iter().sum();
2229        let num_total_cons: usize = all_cons.iter().sum();
2230        let num_total_trans: usize = all_trans.iter().sum();
2231        let num_total_term: usize = all_term.iter().sum();
2232        let num_total = num_total_init + num_total_cons + num_total_trans + num_total_term;
2233
2234        println!();
2235        println!("| Table                |  Init |  Cons | Trans |  Term |   Sum |");
2236        println!("|:---------------------|------:|------:|------:|------:|------:|");
2237        for (name, num_init, num_cons, num_trans, num_term) in
2238            izip!(table_names, all_init, all_cons, all_trans, all_term)
2239        {
2240            let num_total = num_init + num_cons + num_trans + num_term;
2241            println!(
2242                "| {name:<20} | {num_init:>5} | {num_cons:>5} \
2243                 | {num_trans:>5} | {num_term:>5} | {num_total:>5} |",
2244            );
2245        }
2246        println!(
2247            "| {:<20} | {num_total_init:>5} | {num_total_cons:>5} \
2248             | {num_total_trans:>5} | {num_total_term:>5} | {num_total:>5} |",
2249            "Sum",
2250        );
2251    }
2252
2253    #[test]
2254    fn number_of_quotient_degree_bounds_match_number_of_constraints() {
2255        let main_row = Array1::<BFieldElement>::zeros(MasterMainTable::NUM_COLUMNS);
2256        let aux_row = Array1::zeros(MasterAuxTable::NUM_COLUMNS);
2257        let ch = Challenges::default();
2258        let padded_height = 2;
2259        let num_trace_randomizers = 2;
2260        let interpolant_degree = Stark::interpolant_degree(padded_height, num_trace_randomizers);
2261
2262        // Shorten some names for better formatting. This is just a test.
2263        let ph = padded_height;
2264        let id = interpolant_degree;
2265        let mr = main_row.view();
2266        let ar = aux_row.view();
2267
2268        let num_init_quots = MasterAuxTable::NUM_INITIAL_CONSTRAINTS;
2269        let num_cons_quots = MasterAuxTable::NUM_CONSISTENCY_CONSTRAINTS;
2270        let num_tran_quots = MasterAuxTable::NUM_TRANSITION_CONSTRAINTS;
2271        let num_term_quots = MasterAuxTable::NUM_TERMINAL_CONSTRAINTS;
2272
2273        let eval_init_consts = MasterAuxTable::evaluate_initial_constraints(mr, ar, &ch);
2274        let eval_cons_consts = MasterAuxTable::evaluate_consistency_constraints(mr, ar, &ch);
2275        let eval_tran_consts = MasterAuxTable::evaluate_transition_constraints(mr, ar, mr, ar, &ch);
2276        let eval_term_consts = MasterAuxTable::evaluate_terminal_constraints(mr, ar, &ch);
2277
2278        assert!(num_init_quots == eval_init_consts.len());
2279        assert!(num_cons_quots == eval_cons_consts.len());
2280        assert!(num_tran_quots == eval_tran_consts.len());
2281        assert!(num_term_quots == eval_term_consts.len());
2282
2283        assert!(num_init_quots == MasterAuxTable::initial_quotient_degree_bounds(id).len());
2284        assert!(num_cons_quots == MasterAuxTable::consistency_quotient_degree_bounds(id, ph).len());
2285        assert!(num_tran_quots == MasterAuxTable::transition_quotient_degree_bounds(id, ph).len());
2286        assert!(num_term_quots == MasterAuxTable::terminal_quotient_degree_bounds(id).len());
2287    }
2288
2289    type ConstraintResult = Result<(), ConstraintErrorCollection>;
2290
2291    #[derive(Debug, Clone, Eq, PartialEq)]
2292    struct ConstraintErrorCollection {
2293        table: &'static str,
2294        errors: Vec<ConstraintError>,
2295    }
2296
2297    impl ConstraintErrorCollection {
2298        fn new(table: &'static str) -> Self {
2299            let errors = Vec::new();
2300            Self { table, errors }
2301        }
2302
2303        fn record(&mut self, err: ConstraintError) {
2304            self.errors.push(err);
2305        }
2306
2307        fn into_result(self) -> Result<(), Self> {
2308            self.errors.is_empty().then_some(()).ok_or(self)
2309        }
2310    }
2311
2312    impl core::fmt::Display for ConstraintErrorCollection {
2313        fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
2314            for err in &self.errors {
2315                writeln!(f, "{table}: {err}", table = self.table)?;
2316            }
2317
2318            Ok(())
2319        }
2320    }
2321
2322    impl core::error::Error for ConstraintErrorCollection {}
2323
2324    #[derive(Debug, Copy, Clone, Eq, PartialEq, Error)]
2325    enum ConstraintError {
2326        #[error("initial constraint {idx} failed")]
2327        Initial { idx: usize },
2328
2329        #[error("consistency constraint {idx} failed on row {row_idx}")]
2330        Consistency { idx: usize, row_idx: usize },
2331
2332        #[error("transition constraint {idx} failed on row {row_idx}")]
2333        Transition { idx: usize, row_idx: usize },
2334
2335        #[error("terminal constraint {idx} failed.")]
2336        Terminal { idx: usize },
2337    }
2338
2339    macro_rules! check_constraints_fn {
2340        (fn $fn_name:ident for $table:ident) => {
2341            fn $fn_name(
2342                master_main_trace_table: ArrayView2<BFieldElement>,
2343                master_aux_trace_table: ArrayView2<XFieldElement>,
2344                challenges: &Challenges,
2345            ) -> ConstraintResult {
2346                assert!(master_main_trace_table.nrows() == master_aux_trace_table.nrows());
2347                let challenges = &challenges.challenges;
2348
2349                let mut errors = ConstraintErrorCollection::new(stringify!($table));
2350                let builder = ConstraintCircuitBuilder::new();
2351                for (idx, constraint) in $table::initial_constraints(&builder)
2352                    .into_iter()
2353                    .map(|constraint_monad| constraint_monad.consume())
2354                    .enumerate()
2355                {
2356                    let evaluated_constraint = constraint.evaluate(
2357                        master_main_trace_table.slice(s![..1, ..]),
2358                        master_aux_trace_table.slice(s![..1, ..]),
2359                        challenges,
2360                    );
2361                    if evaluated_constraint != xfe!(0) {
2362                        errors.record(ConstraintError::Initial { idx });
2363                    }
2364                }
2365
2366                let builder = ConstraintCircuitBuilder::new();
2367                for (idx, constraint) in $table::consistency_constraints(&builder)
2368                    .into_iter()
2369                    .map(|constraint_monad| constraint_monad.consume())
2370                    .enumerate()
2371                {
2372                    for row_idx in 0..master_main_trace_table.nrows() {
2373                        let evaluated_constraint = constraint.evaluate(
2374                            master_main_trace_table.slice(s![row_idx..=row_idx, ..]),
2375                            master_aux_trace_table.slice(s![row_idx..=row_idx, ..]),
2376                            challenges,
2377                        );
2378                        if evaluated_constraint != xfe!(0) {
2379                            errors.record(ConstraintError::Consistency { idx, row_idx });
2380                        }
2381                    }
2382                }
2383
2384                let builder = ConstraintCircuitBuilder::new();
2385                for (idx, constraint) in $table::transition_constraints(&builder)
2386                    .into_iter()
2387                    .map(|constraint_monad| constraint_monad.consume())
2388                    .enumerate()
2389                {
2390                    for row_idx in 0..master_main_trace_table.nrows() - 1 {
2391                        let evaluated_constraint = constraint.evaluate(
2392                            master_main_trace_table.slice(s![row_idx..=row_idx + 1, ..]),
2393                            master_aux_trace_table.slice(s![row_idx..=row_idx + 1, ..]),
2394                            challenges,
2395                        );
2396                        if evaluated_constraint != xfe!(0) {
2397                            errors.record(ConstraintError::Transition { idx, row_idx });
2398                        }
2399                    }
2400                }
2401
2402                let builder = ConstraintCircuitBuilder::new();
2403                for (idx, constraint) in $table::terminal_constraints(&builder)
2404                    .into_iter()
2405                    .map(|constraint_monad| constraint_monad.consume())
2406                    .enumerate()
2407                {
2408                    let evaluated_constraint = constraint.evaluate(
2409                        master_main_trace_table.slice(s![-1.., ..]),
2410                        master_aux_trace_table.slice(s![-1.., ..]),
2411                        challenges,
2412                    );
2413                    if evaluated_constraint != xfe!(0) {
2414                        errors.record(ConstraintError::Terminal { idx });
2415                    }
2416                }
2417
2418                errors.into_result()
2419            }
2420        };
2421    }
2422
2423    check_constraints_fn!(fn check_program_table_constraints for ProgramTable);
2424    check_constraints_fn!(fn check_processor_table_constraints for ProcessorTable);
2425    check_constraints_fn!(fn check_op_stack_table_constraints for OpStackTable);
2426    check_constraints_fn!(fn check_ram_table_constraints for RamTable);
2427    check_constraints_fn!(fn check_jump_stack_table_constraints for JumpStackTable);
2428    check_constraints_fn!(fn check_hash_table_constraints for HashTable);
2429    check_constraints_fn!(fn check_cascade_table_constraints for CascadeTable);
2430    check_constraints_fn!(fn check_lookup_table_constraints for LookupTable);
2431    check_constraints_fn!(fn check_u32_table_constraints for U32Table);
2432    check_constraints_fn!(fn check_cross_table_constraints for GrandCrossTableArg);
2433
2434    fn triton_constraints_evaluate_to_zero(program: TestableProgram) -> ConstraintResult {
2435        let artifacts = program.generate_proof_artifacts();
2436        let mmt = artifacts.master_main_table.trace_table();
2437        let mat = artifacts.master_aux_table.trace_table();
2438        assert!(mmt.nrows() == mat.nrows());
2439
2440        let challenges = artifacts.challenges;
2441        check_program_table_constraints(mmt, mat, &challenges)?;
2442        check_processor_table_constraints(mmt, mat, &challenges)?;
2443        check_op_stack_table_constraints(mmt, mat, &challenges)?;
2444        check_ram_table_constraints(mmt, mat, &challenges)?;
2445        check_jump_stack_table_constraints(mmt, mat, &challenges)?;
2446        check_hash_table_constraints(mmt, mat, &challenges)?;
2447        check_cascade_table_constraints(mmt, mat, &challenges)?;
2448        check_lookup_table_constraints(mmt, mat, &challenges)?;
2449        check_u32_table_constraints(mmt, mat, &challenges)?;
2450        check_cross_table_constraints(mmt, mat, &challenges)?;
2451
2452        Ok(())
2453    }
2454
2455    #[test]
2456    fn constraints_evaluate_to_zero_on_fibonacci() -> ConstraintResult {
2457        let program = TestableProgram::new(crate::example_programs::FIBONACCI_SEQUENCE.clone())
2458            .with_input(bfe_array![100]);
2459        triton_constraints_evaluate_to_zero(program)
2460    }
2461
2462    #[test]
2463    fn constraints_evaluate_to_zero_on_big_mmr_snippet() -> ConstraintResult {
2464        let program = TestableProgram::new(
2465            crate::example_programs::CALCULATE_NEW_MMR_PEAKS_FROM_APPEND_WITH_SAFE_LISTS.clone(),
2466        );
2467        triton_constraints_evaluate_to_zero(program)
2468    }
2469
2470    #[test]
2471    fn constraints_evaluate_to_zero_on_program_for_halt() -> ConstraintResult {
2472        triton_constraints_evaluate_to_zero(test_program_for_halt())
2473    }
2474
2475    #[test]
2476    fn constraints_evaluate_to_zero_on_program_hash_nop_nop_lt() -> ConstraintResult {
2477        triton_constraints_evaluate_to_zero(test_program_hash_nop_nop_lt())
2478    }
2479
2480    #[test]
2481    fn constraints_evaluate_to_zero_on_program_for_push_pop_dup_swap_nop() -> ConstraintResult {
2482        triton_constraints_evaluate_to_zero(test_program_for_push_pop_dup_swap_nop())
2483    }
2484
2485    #[test]
2486    fn constraints_evaluate_to_zero_on_program_for_divine() -> ConstraintResult {
2487        triton_constraints_evaluate_to_zero(test_program_for_divine())
2488    }
2489
2490    #[test]
2491    fn constraints_evaluate_to_zero_on_program_for_skiz() -> ConstraintResult {
2492        triton_constraints_evaluate_to_zero(test_program_for_skiz())
2493    }
2494
2495    #[test]
2496    fn constraints_evaluate_to_zero_on_program_for_call_recurse_return() -> ConstraintResult {
2497        triton_constraints_evaluate_to_zero(test_program_for_call_recurse_return())
2498    }
2499
2500    #[test]
2501    fn constraints_evaluate_to_zero_on_program_for_recurse_or_return() -> ConstraintResult {
2502        triton_constraints_evaluate_to_zero(test_program_for_recurse_or_return())
2503    }
2504
2505    #[proptest(cases = 20)]
2506    fn constraints_evaluate_to_zero_on_property_based_test_program_for_recurse_or_return(
2507        program: ProgramForRecurseOrReturn,
2508    ) {
2509        triton_constraints_evaluate_to_zero(program.assemble())?;
2510    }
2511
2512    #[test]
2513    fn constraints_evaluate_to_zero_on_program_for_write_mem_read_mem() -> ConstraintResult {
2514        triton_constraints_evaluate_to_zero(test_program_for_write_mem_read_mem())
2515    }
2516
2517    #[test]
2518    fn constraints_evaluate_to_zero_on_program_for_hash() -> ConstraintResult {
2519        triton_constraints_evaluate_to_zero(test_program_for_hash())
2520    }
2521
2522    #[test]
2523    fn constraints_evaluate_to_zero_on_program_for_merkle_step_right_sibling() -> ConstraintResult {
2524        triton_constraints_evaluate_to_zero(test_program_for_merkle_step_right_sibling())
2525    }
2526
2527    #[test]
2528    fn constraints_evaluate_to_zero_on_program_for_merkle_step_left_sibling() -> ConstraintResult {
2529        triton_constraints_evaluate_to_zero(test_program_for_merkle_step_left_sibling())
2530    }
2531
2532    #[test]
2533    fn constraints_evaluate_to_zero_on_program_for_merkle_step_mem_right_sibling()
2534    -> ConstraintResult {
2535        triton_constraints_evaluate_to_zero(test_program_for_merkle_step_mem_right_sibling())
2536    }
2537
2538    // todo: https://github.com/rust-lang/rustfmt/issues/6521
2539    #[rustfmt::skip]
2540    #[test]
2541    fn constraints_evaluate_to_zero_on_program_for_merkle_step_mem_left_sibling()
2542    -> ConstraintResult {
2543        triton_constraints_evaluate_to_zero(test_program_for_merkle_step_mem_left_sibling())
2544    }
2545
2546    #[proptest(cases = 20)]
2547    fn constraints_evaluate_to_zero_on_property_based_test_program_for_merkle_tree_update(
2548        program: ProgramForMerkleTreeUpdate,
2549    ) {
2550        triton_constraints_evaluate_to_zero(program.assemble())?;
2551    }
2552
2553    #[test]
2554    fn constraints_evaluate_to_zero_on_program_for_assert_vector() -> ConstraintResult {
2555        triton_constraints_evaluate_to_zero(test_program_for_assert_vector())
2556    }
2557
2558    #[test]
2559    fn constraints_evaluate_to_zero_on_program_for_sponge_instructions() -> ConstraintResult {
2560        triton_constraints_evaluate_to_zero(test_program_for_sponge_instructions())
2561    }
2562
2563    #[test]
2564    fn constraints_evaluate_to_zero_on_program_for_sponge_instructions_2() -> ConstraintResult {
2565        triton_constraints_evaluate_to_zero(test_program_for_sponge_instructions_2())
2566    }
2567
2568    #[test]
2569    fn constraints_evaluate_to_zero_on_program_for_many_sponge_instructions() -> ConstraintResult {
2570        triton_constraints_evaluate_to_zero(test_program_for_many_sponge_instructions())
2571    }
2572
2573    #[test]
2574    fn constraints_evaluate_to_zero_on_program_for_add_mul_invert() -> ConstraintResult {
2575        triton_constraints_evaluate_to_zero(test_program_for_add_mul_invert())
2576    }
2577
2578    #[test]
2579    fn constraints_evaluate_to_zero_on_program_for_eq() -> ConstraintResult {
2580        triton_constraints_evaluate_to_zero(test_program_for_eq())
2581    }
2582
2583    #[test]
2584    fn constraints_evaluate_to_zero_on_program_for_lsb() -> ConstraintResult {
2585        triton_constraints_evaluate_to_zero(test_program_for_lsb())
2586    }
2587
2588    #[test]
2589    fn constraints_evaluate_to_zero_on_program_for_split() -> ConstraintResult {
2590        triton_constraints_evaluate_to_zero(test_program_for_split())
2591    }
2592
2593    #[test]
2594    fn constraints_evaluate_to_zero_on_program_0_lt_0() -> ConstraintResult {
2595        triton_constraints_evaluate_to_zero(test_program_0_lt_0())
2596    }
2597
2598    #[test]
2599    fn constraints_evaluate_to_zero_on_program_for_lt() -> ConstraintResult {
2600        triton_constraints_evaluate_to_zero(test_program_for_lt())
2601    }
2602
2603    #[test]
2604    fn constraints_evaluate_to_zero_on_program_for_and() -> ConstraintResult {
2605        triton_constraints_evaluate_to_zero(test_program_for_and())
2606    }
2607
2608    #[test]
2609    fn constraints_evaluate_to_zero_on_program_for_xor() -> ConstraintResult {
2610        triton_constraints_evaluate_to_zero(test_program_for_xor())
2611    }
2612
2613    #[test]
2614    fn constraints_evaluate_to_zero_on_program_for_log2floor() -> ConstraintResult {
2615        triton_constraints_evaluate_to_zero(test_program_for_log2floor())
2616    }
2617
2618    #[test]
2619    fn constraints_evaluate_to_zero_on_program_for_pow() -> ConstraintResult {
2620        triton_constraints_evaluate_to_zero(test_program_for_pow())
2621    }
2622
2623    #[test]
2624    fn constraints_evaluate_to_zero_on_program_for_div_mod() -> ConstraintResult {
2625        triton_constraints_evaluate_to_zero(test_program_for_div_mod())
2626    }
2627
2628    #[test]
2629    fn constraints_evaluate_to_zero_on_program_for_starting_with_pop_count() -> ConstraintResult {
2630        triton_constraints_evaluate_to_zero(test_program_for_starting_with_pop_count())
2631    }
2632
2633    #[test]
2634    fn constraints_evaluate_to_zero_on_program_for_pop_count() -> ConstraintResult {
2635        triton_constraints_evaluate_to_zero(test_program_for_pop_count())
2636    }
2637
2638    #[test]
2639    fn constraints_evaluate_to_zero_on_program_for_xx_add() -> ConstraintResult {
2640        triton_constraints_evaluate_to_zero(test_program_for_xx_add())
2641    }
2642
2643    #[test]
2644    fn constraints_evaluate_to_zero_on_program_for_xx_mul() -> ConstraintResult {
2645        triton_constraints_evaluate_to_zero(test_program_for_xx_mul())
2646    }
2647
2648    #[test]
2649    fn constraints_evaluate_to_zero_on_program_for_x_invert() -> ConstraintResult {
2650        triton_constraints_evaluate_to_zero(test_program_for_x_invert())
2651    }
2652
2653    #[test]
2654    fn constraints_evaluate_to_zero_on_program_for_xb_mul() -> ConstraintResult {
2655        triton_constraints_evaluate_to_zero(test_program_for_xb_mul())
2656    }
2657
2658    #[test]
2659    fn constraints_evaluate_to_zero_on_program_for_read_io_write_io() -> ConstraintResult {
2660        triton_constraints_evaluate_to_zero(test_program_for_read_io_write_io())
2661    }
2662
2663    #[test]
2664    fn constraints_evaluate_to_zero_on_property_based_test_program_for_assert_vector()
2665    -> ConstraintResult {
2666        triton_constraints_evaluate_to_zero(property_based_test_program_for_assert_vector())
2667    }
2668
2669    #[test]
2670    fn constraints_evaluate_to_zero_on_single_sponge_absorb_mem_instructions() -> ConstraintResult {
2671        let program = triton_program!(sponge_init sponge_absorb_mem halt);
2672        let program = TestableProgram::new(program);
2673        triton_constraints_evaluate_to_zero(program)
2674    }
2675
2676    #[proptest(cases = 3)]
2677    fn constraints_evaluate_to_zero_on_property_based_test_program_for_sponge_instructions(
2678        program: ProgramForSpongeAndHashInstructions,
2679    ) {
2680        triton_constraints_evaluate_to_zero(program.assemble())?;
2681    }
2682
2683    #[test]
2684    fn constraints_evaluate_to_zero_on_property_based_test_program_for_split() -> ConstraintResult {
2685        triton_constraints_evaluate_to_zero(property_based_test_program_for_split())
2686    }
2687
2688    #[test]
2689    fn constraints_evaluate_to_zero_on_property_based_test_program_for_eq() -> ConstraintResult {
2690        triton_constraints_evaluate_to_zero(property_based_test_program_for_eq())
2691    }
2692
2693    #[test]
2694    fn constraints_evaluate_to_zero_on_property_based_test_program_for_lsb() -> ConstraintResult {
2695        triton_constraints_evaluate_to_zero(property_based_test_program_for_lsb())
2696    }
2697
2698    #[test]
2699    fn constraints_evaluate_to_zero_on_property_based_test_program_for_lt() -> ConstraintResult {
2700        triton_constraints_evaluate_to_zero(property_based_test_program_for_lt())
2701    }
2702
2703    #[test]
2704    fn constraints_evaluate_to_zero_on_property_based_test_program_for_and() -> ConstraintResult {
2705        triton_constraints_evaluate_to_zero(property_based_test_program_for_and())
2706    }
2707
2708    #[test]
2709    fn constraints_evaluate_to_zero_on_property_based_test_program_for_xor() -> ConstraintResult {
2710        triton_constraints_evaluate_to_zero(property_based_test_program_for_xor())
2711    }
2712
2713    #[test]
2714    fn constraints_evaluate_to_zero_on_property_based_test_program_for_log2floor()
2715    -> ConstraintResult {
2716        triton_constraints_evaluate_to_zero(property_based_test_program_for_log2floor())
2717    }
2718
2719    #[test]
2720    fn constraints_evaluate_to_zero_on_property_based_test_program_for_pow() -> ConstraintResult {
2721        triton_constraints_evaluate_to_zero(property_based_test_program_for_pow())
2722    }
2723
2724    #[test]
2725    fn constraints_evaluate_to_zero_on_property_based_test_program_for_div_mod() -> ConstraintResult
2726    {
2727        triton_constraints_evaluate_to_zero(property_based_test_program_for_div_mod())
2728    }
2729
2730    #[test]
2731    fn constraints_evaluate_to_zero_on_property_based_test_program_for_pop_count()
2732    -> ConstraintResult {
2733        triton_constraints_evaluate_to_zero(property_based_test_program_for_pop_count())
2734    }
2735
2736    #[test]
2737    fn constraints_evaluate_to_zero_on_property_based_test_program_for_is_u32() -> ConstraintResult
2738    {
2739        triton_constraints_evaluate_to_zero(property_based_test_program_for_is_u32())
2740    }
2741
2742    #[test]
2743    fn constraints_evaluate_to_zero_on_property_based_test_program_for_random_ram_access()
2744    -> ConstraintResult {
2745        triton_constraints_evaluate_to_zero(property_based_test_program_for_random_ram_access())
2746    }
2747
2748    #[test]
2749    fn constraints_evaluate_to_zero_on_property_based_test_program_for_xx_dot_step()
2750    -> ConstraintResult {
2751        triton_constraints_evaluate_to_zero(property_based_test_program_for_xx_dot_step())
2752    }
2753
2754    #[test]
2755    fn constraints_evaluate_to_zero_on_property_based_test_program_for_xb_dot_step()
2756    -> ConstraintResult {
2757        triton_constraints_evaluate_to_zero(property_based_test_program_for_xb_dot_step())
2758    }
2759
2760    #[test]
2761    fn can_read_twice_from_same_ram_address_within_one_cycle() -> ConstraintResult {
2762        for i in 0..x_field_element::EXTENSION_DEGREE {
2763            // This program reads from the same address twice, even if the stack
2764            // is not well-initialized.
2765            let program = triton_program! {
2766                dup 0
2767                addi {i}
2768                xx_dot_step
2769                halt
2770            };
2771            let program = TestableProgram::new(program);
2772            debug_assert!(program.clone().run().is_ok());
2773            triton_constraints_evaluate_to_zero(program)?;
2774        }
2775
2776        Ok(())
2777    }
2778
2779    #[test]
2780    fn claim_in_ram_corresponds_to_currently_running_program() -> ConstraintResult {
2781        triton_constraints_evaluate_to_zero(
2782            test_program_claim_in_ram_corresponds_to_currently_running_program(),
2783        )
2784    }
2785
2786    #[test]
2787    fn derived_constraints_evaluate_to_zero_on_halt() {
2788        derived_constraints_evaluate_to_zero(test_program_for_halt());
2789    }
2790
2791    fn derived_constraints_evaluate_to_zero(program: TestableProgram) {
2792        let artifacts = program.generate_proof_artifacts();
2793        let master_main_trace_table = artifacts.master_main_table.trace_table();
2794        let master_aux_trace_table = artifacts.master_aux_table.trace_table();
2795        let challenges = artifacts.challenges;
2796
2797        let evaluated_initial_constraints = MasterAuxTable::evaluate_initial_constraints(
2798            master_main_trace_table.row(0),
2799            master_aux_trace_table.row(0),
2800            &challenges,
2801        );
2802        for (constraint_idx, evaluated_constraint) in
2803            evaluated_initial_constraints.into_iter().enumerate()
2804        {
2805            assert!(
2806                xfe!(0) == evaluated_constraint,
2807                "Initial constraint {constraint_idx} failed.",
2808            );
2809        }
2810
2811        for row_idx in 0..master_main_trace_table.nrows() {
2812            let evaluated_consistency_constraints =
2813                MasterAuxTable::evaluate_consistency_constraints(
2814                    master_main_trace_table.row(row_idx),
2815                    master_aux_trace_table.row(row_idx),
2816                    &challenges,
2817                );
2818            for (constraint_idx, evaluated_constraint) in
2819                evaluated_consistency_constraints.into_iter().enumerate()
2820            {
2821                assert!(
2822                    xfe!(0) == evaluated_constraint,
2823                    "Consistency constraint {constraint_idx} failed in row {row_idx}.",
2824                );
2825            }
2826        }
2827
2828        for curr_row_idx in 0..master_main_trace_table.nrows() - 1 {
2829            let next_row_idx = curr_row_idx + 1;
2830            let evaluated_transition_constraints = MasterAuxTable::evaluate_transition_constraints(
2831                master_main_trace_table.row(curr_row_idx),
2832                master_aux_trace_table.row(curr_row_idx),
2833                master_main_trace_table.row(next_row_idx),
2834                master_aux_trace_table.row(next_row_idx),
2835                &challenges,
2836            );
2837            for (constraint_idx, evaluated_constraint) in
2838                evaluated_transition_constraints.into_iter().enumerate()
2839            {
2840                assert!(
2841                    xfe!(0) == evaluated_constraint,
2842                    "Transition constraint {constraint_idx} failed in row {curr_row_idx}.",
2843                );
2844            }
2845        }
2846
2847        let evaluated_terminal_constraints = MasterAuxTable::evaluate_terminal_constraints(
2848            master_main_trace_table.row(master_main_trace_table.nrows() - 1),
2849            master_aux_trace_table.row(master_aux_trace_table.nrows() - 1),
2850            &challenges,
2851        );
2852        for (constraint_idx, evaluated_constraint) in
2853            evaluated_terminal_constraints.into_iter().enumerate()
2854        {
2855            assert!(
2856                xfe!(0) == evaluated_constraint,
2857                "Terminal constraint {constraint_idx} failed.",
2858            );
2859        }
2860    }
2861
2862    #[test]
2863    fn prove_and_verify_simple_program() {
2864        test_program_hash_nop_nop_lt().prove_and_verify();
2865    }
2866
2867    #[proptest(cases = 10)]
2868    fn prove_and_verify_halt_with_different_stark_parameters(#[strategy(arb())] stark: Stark) {
2869        test_program_for_halt().use_stark(stark).prove_and_verify();
2870    }
2871
2872    #[test]
2873    fn prove_and_verify_fibonacci_100() {
2874        TestableProgram::new(crate::example_programs::FIBONACCI_SEQUENCE.clone())
2875            .with_input(PublicInput::from(bfe_array![100]))
2876            .prove_and_verify();
2877    }
2878
2879    #[test]
2880    fn prove_verify_program_using_pick_and_place() {
2881        let input = bfe_vec![6, 3, 7, 5, 1, 2, 4, 4, 7, 3, 6, 1, 5, 2];
2882        let program = triton_program! {       // i: 13 12 11 10  9  8  7  6  5  4  3  2  1  0
2883            read_io 5 read_io 5 read_io 4     //  _  6  3  7  5 ›1‹ 2  4  4  7  3  6 ›1‹ 5  2
2884            pick 2 pick 9 place 13 place 13   //  _  1  1  6  3  7  5 ›2‹ 4  4  7  3  6  5 ›2‹
2885            pick 0 pick 7 place 13 place 13   //  _  2  2  1  1  6 ›3‹ 7  5  4  4  7 ›3‹ 6  5
2886            pick 2 pick 8 place 13 place 13   //  _  3  3  2  2  1  1  6  7  5 ›4‹›4‹ 7  6  5
2887            pick 3 pick 4 place 13 place 13   //  _  4  4  3  3  2  2  1  1  6  7 ›5‹ 7  6 ›5‹
2888            pick 0 pick 3 place 13 place 13   //  _  5  5  4  4  3  3  2  2  1  1 ›6‹ 7  7 ›6‹
2889            pick 0 pick 3 place 13 place 13   //  _  6  6  5  5  4  4  3  3  2  2  1  1 ›7‹›7‹
2890            pick 1 pick 1 place 13 place 13   //  _  7  7  6  6  5  5  4  4  3  3  2  2  1  1
2891            write_io 5 write_io 5 write_io 4  //  _
2892            halt
2893        };
2894
2895        let program = TestableProgram::new(program).with_input(input);
2896        let output = program.clone().run().unwrap();
2897        let expected_output = bfe_vec![1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7];
2898        assert!(expected_output == output);
2899
2900        program.prove_and_verify();
2901    }
2902
2903    #[test]
2904    fn constraints_evaluate_to_zero_on_many_u32_operations() -> ConstraintResult {
2905        let many_u32_instructions = TestableProgram::new(
2906            crate::example_programs::PROGRAM_WITH_MANY_U32_INSTRUCTIONS.clone(),
2907        );
2908        triton_constraints_evaluate_to_zero(many_u32_instructions)
2909    }
2910
2911    #[test]
2912    fn prove_verify_many_u32_operations() {
2913        TestableProgram::new(crate::example_programs::PROGRAM_WITH_MANY_U32_INSTRUCTIONS.clone())
2914            .prove_and_verify();
2915    }
2916
2917    #[proptest]
2918    fn verifying_arbitrary_proof_does_not_panic(
2919        #[strategy(arb())] stark: Stark,
2920        #[strategy(arb())] claim: Claim,
2921        #[strategy(arb())] proof: Proof,
2922    ) {
2923        let _verdict = stark.verify(&claim, &proof);
2924    }
2925
2926    #[proptest]
2927    fn negative_log_2_floor(
2928        #[strategy(arb())]
2929        #[filter(#st0.value() > u64::from(u32::MAX))]
2930        st0: BFieldElement,
2931    ) {
2932        let program = triton_program!(push {st0} log_2_floor halt);
2933        let_assert!(Err(err) = VM::run(program, [].into(), [].into()));
2934        let_assert!(InstructionError::OpStackError(err) = err.source);
2935        let_assert!(OpStackError::FailedU32Conversion(element) = err);
2936        assert!(st0 == element);
2937    }
2938
2939    #[test]
2940    fn negative_log_2_floor_of_0() {
2941        let program = triton_program!(push 0 log_2_floor halt);
2942        let_assert!(Err(err) = VM::run(program, [].into(), [].into()));
2943        let_assert!(InstructionError::LogarithmOfZero = err.source);
2944    }
2945
2946    #[test]
2947    fn deep_update() {
2948        let domain_length = 1 << 10;
2949        let domain = ArithmeticDomain::of_length(domain_length).unwrap();
2950
2951        let poly_degree = rand::rng().random_range(2..20);
2952        let low_deg_poly_coeffs: Vec<XFieldElement> = random_elements(poly_degree);
2953        let low_deg_poly = Polynomial::new(low_deg_poly_coeffs.clone());
2954        let low_deg_codeword = domain.evaluate(&low_deg_poly);
2955
2956        let out_of_domain_point: XFieldElement = rand::rng().random();
2957        let out_of_domain_value = low_deg_poly.evaluate(out_of_domain_point);
2958
2959        let deep_poly = Prover::deep_codeword(
2960            &low_deg_codeword,
2961            domain,
2962            out_of_domain_point,
2963            out_of_domain_value,
2964        );
2965        let poly_of_maybe_low_degree = domain.interpolate(&deep_poly);
2966        assert!(poly_degree as isize - 2 == poly_of_maybe_low_degree.degree());
2967
2968        let bogus_out_of_domain_value = rand::rng().random();
2969        let bogus_deep_poly = Prover::deep_codeword(
2970            &low_deg_codeword,
2971            domain,
2972            out_of_domain_point,
2973            bogus_out_of_domain_value,
2974        );
2975        let poly_of_hopefully_high_degree = domain.interpolate(&bogus_deep_poly);
2976        assert!(domain_length as isize - 1 == poly_of_hopefully_high_degree.degree());
2977    }
2978
2979    /// Re-compose the segments of a polynomial and assert that the result is
2980    /// equal to the polynomial itself. Uses the Schwartz-Zippel lemma to
2981    /// test polynomial equality.
2982    fn assert_polynomial_equals_recomposed_segments<const N: usize, FF: FiniteField>(
2983        f: &Polynomial<FF>,
2984        segments: &[Polynomial<FF>; N],
2985        x: FF,
2986    ) {
2987        let x_pow_n = x.mod_pow_u32(N as u32);
2988        let evaluate_segment = |(segment_idx, segment): (_, &Polynomial<_>)| {
2989            segment.evaluate::<_, FF>(x_pow_n) * x.mod_pow_u32(segment_idx as u32)
2990        };
2991        let evaluated_segments = segments.iter().enumerate().map(evaluate_segment);
2992        let sum_of_evaluated_segments = evaluated_segments.fold(FF::zero(), |acc, x| acc + x);
2993        assert!(f.evaluate::<_, FF>(x) == sum_of_evaluated_segments);
2994    }
2995
2996    fn assert_segments_degrees_are_small_enough<const N: usize, FF: FiniteField>(
2997        f: &Polynomial<FF>,
2998        segments: &[Polynomial<FF>; N],
2999    ) {
3000        let max_allowed_degree = f.degree() / (N as isize);
3001        let all_degrees_are_small_enough =
3002            segments.iter().all(|s| s.degree() <= max_allowed_degree);
3003        assert!(all_degrees_are_small_enough);
3004    }
3005
3006    #[test]
3007    fn split_polynomial_into_segments_of_unequal_size() {
3008        let coefficients: [XFieldElement; 211] = rand::rng().random();
3009        let f = Polynomial::new(coefficients.to_vec());
3010
3011        let segments_2 = Prover::split_polynomial_into_segments::<2, _>(f.clone());
3012        let segments_3 = Prover::split_polynomial_into_segments::<3, _>(f.clone());
3013        let segments_4 = Prover::split_polynomial_into_segments::<4, _>(f.clone());
3014        let segments_7 = Prover::split_polynomial_into_segments::<7, _>(f.clone());
3015
3016        assert_segments_degrees_are_small_enough(&f, &segments_2);
3017        assert_segments_degrees_are_small_enough(&f, &segments_3);
3018        assert_segments_degrees_are_small_enough(&f, &segments_4);
3019        assert_segments_degrees_are_small_enough(&f, &segments_7);
3020
3021        let x = rand::rng().random();
3022        assert_polynomial_equals_recomposed_segments(&f, &segments_2, x);
3023        assert_polynomial_equals_recomposed_segments(&f, &segments_3, x);
3024        assert_polynomial_equals_recomposed_segments(&f, &segments_4, x);
3025        assert_polynomial_equals_recomposed_segments(&f, &segments_7, x);
3026    }
3027
3028    #[test]
3029    fn split_polynomial_into_segments_of_equal_size() {
3030        let coefficients: [BFieldElement; 2 * 3 * 4 * 7] = rand::rng().random();
3031        let f = Polynomial::new(coefficients.to_vec());
3032
3033        let segments_2 = Prover::split_polynomial_into_segments::<2, _>(f.clone());
3034        let segments_3 = Prover::split_polynomial_into_segments::<3, _>(f.clone());
3035        let segments_4 = Prover::split_polynomial_into_segments::<4, _>(f.clone());
3036        let segments_7 = Prover::split_polynomial_into_segments::<7, _>(f.clone());
3037
3038        assert_segments_degrees_are_small_enough(&f, &segments_2);
3039        assert_segments_degrees_are_small_enough(&f, &segments_3);
3040        assert_segments_degrees_are_small_enough(&f, &segments_4);
3041        assert_segments_degrees_are_small_enough(&f, &segments_7);
3042
3043        let x = rand::rng().random();
3044        assert_polynomial_equals_recomposed_segments(&f, &segments_2, x);
3045        assert_polynomial_equals_recomposed_segments(&f, &segments_3, x);
3046        assert_polynomial_equals_recomposed_segments(&f, &segments_4, x);
3047        assert_polynomial_equals_recomposed_segments(&f, &segments_7, x);
3048    }
3049
3050    #[derive(Debug, Clone, Eq, PartialEq, Hash, test_strategy::Arbitrary)]
3051    struct SegmentifyProptestData {
3052        #[strategy(2_usize..8)]
3053        log_trace_length: usize,
3054
3055        #[strategy(1_usize..=#log_trace_length.min(4))]
3056        log_num_cosets: usize,
3057
3058        #[strategy(1_usize..6)]
3059        log_expansion_factor: usize,
3060
3061        #[strategy(vec(arb(), (1 << #log_num_cosets) * (1 << #log_trace_length)))]
3062        #[map(Polynomial::new)]
3063        polynomial: Polynomial<'static, XFieldElement>,
3064
3065        #[strategy(arb())]
3066        psi: BFieldElement,
3067
3068        #[strategy(arb())]
3069        random_point: XFieldElement,
3070    }
3071
3072    #[proptest]
3073    fn polynomial_segments_cohere_with_originating_polynomial(test_data: SegmentifyProptestData) {
3074        fn segmentify_and_assert_coherence<const N: usize>(
3075            test_data: &SegmentifyProptestData,
3076        ) -> TestCaseResult {
3077            let num_cosets = 1 << test_data.log_num_cosets;
3078            let trace_length = 1 << test_data.log_trace_length;
3079            let expansion_factor = 1 << test_data.log_expansion_factor;
3080
3081            let iota =
3082                BFieldElement::primitive_root_of_unity((trace_length * num_cosets) as u64).unwrap();
3083            let trace_domain = ArithmeticDomain::of_length(trace_length)?;
3084            let fri_domain = ArithmeticDomain::of_length(trace_length * expansion_factor)?
3085                .with_offset(test_data.psi);
3086
3087            let coset_evaluations = (0..u32::try_from(num_cosets)?)
3088                .flat_map(|i| {
3089                    let coset = trace_domain.with_offset(iota.mod_pow_u32(i) * test_data.psi);
3090                    coset.evaluate(&test_data.polynomial)
3091                })
3092                .collect();
3093            let coset_evaluations =
3094                Array2::from_shape_vec((trace_length, num_cosets).f(), coset_evaluations)?;
3095
3096            let (actual_segment_codewords, segment_polynomials) =
3097                Prover::segmentify::<N>(coset_evaluations, test_data.psi, iota, fri_domain);
3098
3099            prop_assert_eq!(N, actual_segment_codewords.ncols());
3100            prop_assert_eq!(N, segment_polynomials.len());
3101
3102            let segments_evaluated = (0..)
3103                .zip(&segment_polynomials)
3104                .map(|(segment_index, segment_polynomial)| -> XFieldElement {
3105                    let point_to_the_seg_idx = test_data.random_point.mod_pow_u32(segment_index);
3106                    let point_to_the_num_seg = test_data.random_point.mod_pow_u32(N as u32);
3107                    let segment_in_point_to_the_num_seg =
3108                        segment_polynomial.evaluate_in_same_field(point_to_the_num_seg);
3109                    point_to_the_seg_idx * segment_in_point_to_the_num_seg
3110                })
3111                .sum::<XFieldElement>();
3112            let evaluation_in_random_point = test_data
3113                .polynomial
3114                .evaluate_in_same_field(test_data.random_point);
3115            prop_assert_eq!(segments_evaluated, evaluation_in_random_point);
3116
3117            let segments_codewords = segment_polynomials
3118                .iter()
3119                .flat_map(|polynomial| Array1::from(fri_domain.evaluate(polynomial)))
3120                .collect_vec();
3121            let segments_codewords =
3122                Array2::from_shape_vec((fri_domain.length, N).f(), segments_codewords)?;
3123            prop_assert_eq!(segments_codewords, actual_segment_codewords);
3124
3125            Ok(())
3126        }
3127
3128        let num_cosets = 1 << test_data.log_num_cosets;
3129        if num_cosets >= 1 {
3130            segmentify_and_assert_coherence::<1>(&test_data)?;
3131        }
3132        if num_cosets >= 2 {
3133            segmentify_and_assert_coherence::<2>(&test_data)?;
3134        }
3135        if num_cosets >= 4 {
3136            segmentify_and_assert_coherence::<4>(&test_data)?;
3137        }
3138        if num_cosets >= 8 {
3139            segmentify_and_assert_coherence::<8>(&test_data)?;
3140        }
3141        if num_cosets >= 16 {
3142            segmentify_and_assert_coherence::<16>(&test_data)?;
3143        }
3144    }
3145
3146    #[proptest]
3147    fn linear_combination_weights_samples_correct_number_of_elements(
3148        #[strategy(arb())] mut proof_stream: ProofStream,
3149    ) {
3150        let weights = LinearCombinationWeights::sample(&mut proof_stream);
3151
3152        prop_assert_eq!(MasterMainTable::NUM_COLUMNS, weights.main.len());
3153        prop_assert_eq!(MasterAuxTable::NUM_COLUMNS, weights.aux.len());
3154        prop_assert_eq!(NUM_QUOTIENT_SEGMENTS, weights.quot_segments.len());
3155        prop_assert_eq!(NUM_DEEP_CODEWORD_COMPONENTS, weights.deep.len());
3156        prop_assert_eq!(
3157            MasterMainTable::NUM_COLUMNS + MasterAuxTable::NUM_COLUMNS,
3158            weights.main_and_aux().len()
3159        );
3160    }
3161
3162    /// A program that executes every instruction in the instruction set.
3163    pub fn program_executing_every_instruction() -> TestableProgram {
3164        let m_step_mem_addr = 100_000;
3165
3166        let program = triton_program! {
3167            // merkle_step using the following fake tree:
3168            //     ─── 1 ───
3169            //    ╱         ╲
3170            //   2           3
3171            //  ╱  ╲
3172            // 4    5
3173            push {m_step_mem_addr}  // _ addr (address for `merkle_step_mem`)
3174            push 0                  // _ addr 0 (spacer)
3175            push 5                  // _ addr 0 5 (node index for `merkle_step`s)
3176            read_io 5               // _ addr 0 5 [digest; 5]
3177            merkle_step             // _ addr 0 2 [digest; 5]
3178            merkle_step_mem         // _ addr 0 1 [digest; 5]
3179            divine 5                // _ addr 0 1 [digest; 5] [digest; 5]
3180            assert_vector           // _ addr 0 1 [digest; 5]
3181            pop 5                   // _ addr 0 1
3182            assert                  // _ addr 0
3183            pop 2                   // _
3184
3185            // stack manipulation
3186            push 1 push 2 push 3    // _  1  2  3
3187            place 2                 // _  3  1  2
3188            pick 1                  // _  3  2  1
3189            swap 2                  // _  1  2  3
3190            dup 2 assert            // _  1  2  3
3191            addi -2 assert          // _  1  2
3192            addi -1 assert          // _  1
3193            assert                  // _
3194
3195            // dot_step
3196            push 0 push 0 push 0    // _ [accumulator; 3]
3197            push 500                // _ [accumulator; 3] addr_0
3198            push 800                // _ [accumulator; 3] addr_0 addr_1
3199            xb_dot_step             // _ [accumulator; 3] addr_0 addr_1
3200            xx_dot_step             // _ [accumulator; 3] addr_0 addr_1
3201            write_io 5              // _
3202
3203            // extension field arithmetic
3204            push 1 push 2 push 3    // _ [xfe_0; 3]
3205            push 7 push 8 push 9    // _ [xfe_0; 3] [xfe_1; 3]
3206            dup 3 dup 3 dup 3       // _ [xfe_0; 3] [xfe_1; 3] [xfe_2; 3]
3207            xx_add                  // _ [xfe_0; 3] [xfe_1; 3]
3208            dup 4 dup 4 dup 4       // _ [xfe_0; 3] [xfe_1; 3] [xfe_2; 3]
3209            xx_mul                  // _ [xfe_0; 3] [xfe_1; 3]
3210            x_invert                // _ [xfe_0; 3] [xfe_1; 3]
3211            push 42                 // _ [xfe_0; 3] [xfe_1; 3] 42
3212            xb_mul                  // _ [xfe_0; 3] [xfe_1; 3]
3213
3214            // base field arithmetic
3215            add mul                 // _ bfe_0 bfe_1 bfe_2 bfe_3
3216            addi 0                  // _ bfe_0 bfe_1 bfe_2 bfe_3
3217            invert                  // _ bfe_0 bfe_1 bfe_2 bfe_3
3218            mul add                 // _ bfe_0 bfe_1
3219            eq                      // _ bfe_0
3220            pop 1                   // _
3221
3222            // bit-wise arithmetic
3223            push 38                 // _ 38
3224            push 2                  // _ 38 2
3225            pow                     // _ big_num
3226            push 1337               // _ big_num 1337
3227            add                     // _ big_num
3228            split                   // _ u32_0 u32_1
3229            dup 1 dup 1 lt pop 1    // _ u32_0 u32_1
3230            dup 1 and               // _ u32_0 u32_1
3231            dup 1 xor               // _ u32_0 u32_1
3232            push 9                  // _ u32_0 u32_1 9
3233            log_2_floor pop 1       // _ u32_0 u32_1
3234            div_mod                 // _ u32_0 u32_1
3235            pop_count               // _ u32_0 u32_1
3236            pop 2                   // _
3237
3238            // Sponge
3239            sponge_init             // _
3240            divine 5 divine 5       // _ [stuff; 10]
3241            sponge_absorb           // _
3242            push 42                 // _ 42
3243            sponge_absorb_mem       // _ 52
3244            pop 1                   // _
3245            sponge_squeeze          // _ [stuff; 10]
3246            hash                    // _ [stuff; 5]
3247            pop 5                   // _
3248
3249            // RAM
3250            push 300                // _ address
3251            read_mem 5              // _ [stuff; 5] address
3252            swap 6                  // _ [stuff; 5] address
3253            write_mem 5             // _ address
3254            pop 1                   // _
3255
3256            // control flow
3257            push 0 skiz nop         // _
3258            push 1 skiz nop         // _
3259            push 0 push 2           // _ 0 2
3260            push 0 push 0 push 0    // _ 0 2 [0; 3]
3261            push 0 push 0           // _ 0 2 [0; 5]
3262            call rec_or_ret         // _ 0 0 [0; 5]
3263            pop 5 pop 2             // _
3264            push 2                  // _ 2
3265            call rec                // _ 0
3266            pop 1
3267            halt
3268
3269            // BEFORE: _ n
3270            // AFTER:  _ 0
3271            rec:
3272                dup 0 push 0 eq     // _ n n==0
3273                skiz return         // _ n
3274                push -1 add         // _ n-1
3275                recurse             // _ n-1
3276
3277            // BEFORE: _ m n [_; 5]
3278            // AFTER:  _ m m [_; 5]
3279            rec_or_ret:
3280                swap 5              // _ m [_; 5] n
3281                push -1 add         // _ m [_; 5] n-1
3282                swap 5              // _ m n-1 [_; 5]
3283                recurse_or_return   // _ m n-1 [_; 5]
3284        };
3285
3286        let tree_node_5 = Digest::new(bfe_array![5; 5]);
3287        let tree_node_4 = Digest::new(bfe_array![4; 5]);
3288        let tree_node_3 = Digest::new(bfe_array![3; 5]);
3289        let tree_node_2 = Tip5::hash_pair(tree_node_4, tree_node_5);
3290        let tree_node_1 = Tip5::hash_pair(tree_node_2, tree_node_3);
3291
3292        let public_input = tree_node_5.values().to_vec();
3293
3294        let secret_input = [tree_node_1.reversed().values().to_vec(), bfe_vec![1337; 10]].concat();
3295        let mut ram = (0..)
3296            .zip(42..)
3297            .take(1_000)
3298            .map(|(l, r)| (bfe!(l), bfe!(r)))
3299            .collect::<HashMap<_, _>>();
3300        for (address, digest_element) in (m_step_mem_addr..).zip(tree_node_3.values()) {
3301            ram.insert(bfe!(address), digest_element);
3302        }
3303        let non_determinism = NonDeterminism::new(secret_input)
3304            .with_digests([tree_node_4])
3305            .with_ram(ram);
3306
3307        TestableProgram::new(program)
3308            .with_input(public_input)
3309            .with_non_determinism(non_determinism)
3310    }
3311
3312    #[test]
3313    fn program_executing_every_instruction_actually_executes_every_instruction() {
3314        let TestableProgram {
3315            program,
3316            public_input,
3317            non_determinism,
3318            ..
3319        } = program_executing_every_instruction();
3320        let (aet, _) = VM::trace_execution(program, public_input, non_determinism).unwrap();
3321        let opcodes_of_all_executed_instructions = aet
3322            .processor_trace
3323            .column(ProcessorMainColumn::CI.main_index())
3324            .iter()
3325            .copied()
3326            .collect::<HashSet<_>>();
3327
3328        let all_opcodes = Instruction::iter()
3329            .map(|instruction| instruction.opcode_b())
3330            .collect::<HashSet<_>>();
3331
3332        all_opcodes
3333            .difference(&opcodes_of_all_executed_instructions)
3334            .for_each(|&opcode| {
3335                let instruction = Instruction::try_from(opcode).unwrap();
3336                eprintln!("Instruction {instruction} was not executed.");
3337            });
3338        assert_eq!(all_opcodes, opcodes_of_all_executed_instructions);
3339    }
3340
3341    #[test]
3342    fn constraints_evaluate_to_zero_on_program_executing_every_instruction() -> ConstraintResult {
3343        triton_constraints_evaluate_to_zero(program_executing_every_instruction())
3344    }
3345}