sp1_recursion_core/chips/
fri_fold.rs

1#![allow(clippy::needless_range_loop)]
2
3use core::borrow::Borrow;
4use itertools::Itertools;
5use p3_baby_bear::BabyBear;
6use sp1_core_machine::utils::{next_power_of_two, pad_rows_fixed};
7use sp1_stark::air::{BinomialExtension, MachineAir};
8use std::borrow::BorrowMut;
9use tracing::instrument;
10
11use p3_air::{Air, AirBuilder, BaseAir, PairBuilder};
12use p3_field::{AbstractField, PrimeField32};
13use p3_matrix::{dense::RowMajorMatrix, Matrix};
14use sp1_stark::air::{BaseAirBuilder, ExtensionAirBuilder};
15
16use sp1_derive::AlignedBorrow;
17
18use crate::{
19    air::Block, builder::SP1RecursionAirBuilder, runtime::Instruction, ExecutionRecord,
20    FriFoldEvent, FriFoldInstr,
21};
22
23use super::mem::MemoryAccessColsChips;
24
25pub const NUM_FRI_FOLD_COLS: usize = core::mem::size_of::<FriFoldCols<u8>>();
26pub const NUM_FRI_FOLD_PREPROCESSED_COLS: usize =
27    core::mem::size_of::<FriFoldPreprocessedCols<u8>>();
28
29pub struct FriFoldChip<const DEGREE: usize> {
30    pub fixed_log2_rows: Option<usize>,
31    pub pad: bool,
32}
33
34impl<const DEGREE: usize> Default for FriFoldChip<DEGREE> {
35    fn default() -> Self {
36        Self { fixed_log2_rows: None, pad: true }
37    }
38}
39
40/// The preprocessed columns for a FRI fold invocation.
41#[derive(AlignedBorrow, Debug, Clone, Copy)]
42#[repr(C)]
43pub struct FriFoldPreprocessedCols<T: Copy> {
44    pub is_first: T,
45
46    // Memory accesses for the single fields.
47    pub z_mem: MemoryAccessColsChips<T>,
48    pub alpha_mem: MemoryAccessColsChips<T>,
49    pub x_mem: MemoryAccessColsChips<T>,
50
51    // Memory accesses for the vector field inputs.
52    pub alpha_pow_input_mem: MemoryAccessColsChips<T>,
53    pub ro_input_mem: MemoryAccessColsChips<T>,
54    pub p_at_x_mem: MemoryAccessColsChips<T>,
55    pub p_at_z_mem: MemoryAccessColsChips<T>,
56
57    // Memory accesses for the vector field outputs.
58    pub ro_output_mem: MemoryAccessColsChips<T>,
59    pub alpha_pow_output_mem: MemoryAccessColsChips<T>,
60
61    pub is_real: T,
62}
63
64#[derive(AlignedBorrow, Debug, Clone, Copy)]
65#[repr(C)]
66pub struct FriFoldCols<T: Copy> {
67    pub z: Block<T>,
68    pub alpha: Block<T>,
69    pub x: T,
70
71    pub p_at_x: Block<T>,
72    pub p_at_z: Block<T>,
73    pub alpha_pow_input: Block<T>,
74    pub ro_input: Block<T>,
75
76    pub alpha_pow_output: Block<T>,
77    pub ro_output: Block<T>,
78}
79
80impl<F, const DEGREE: usize> BaseAir<F> for FriFoldChip<DEGREE> {
81    fn width(&self) -> usize {
82        NUM_FRI_FOLD_COLS
83    }
84}
85
86impl<F: PrimeField32, const DEGREE: usize> MachineAir<F> for FriFoldChip<DEGREE> {
87    type Record = ExecutionRecord<F>;
88
89    type Program = crate::RecursionProgram<F>;
90
91    fn name(&self) -> String {
92        "FriFold".to_string()
93    }
94
95    fn generate_dependencies(&self, _: &Self::Record, _: &mut Self::Record) {
96        // This is a no-op.
97    }
98
99    fn preprocessed_width(&self) -> usize {
100        NUM_FRI_FOLD_PREPROCESSED_COLS
101    }
102
103    fn generate_preprocessed_trace(&self, program: &Self::Program) -> Option<RowMajorMatrix<F>> {
104        assert_eq!(
105            std::any::TypeId::of::<F>(),
106            std::any::TypeId::of::<BabyBear>(),
107            "generate_trace only supports BabyBear field"
108        );
109
110        let mut rows: Vec<[BabyBear; NUM_FRI_FOLD_PREPROCESSED_COLS]> = Vec::new();
111        program
112            .inner
113            .iter()
114            .filter_map(|instruction| match instruction {
115                Instruction::FriFold(instr) => Some(unsafe {
116                    std::mem::transmute::<&Box<FriFoldInstr<F>>, &Box<FriFoldInstr<BabyBear>>>(
117                        instr,
118                    )
119                }),
120                _ => None,
121            })
122            .for_each(|instruction| {
123                let mut row_add = vec![
124                    [BabyBear::zero(); NUM_FRI_FOLD_PREPROCESSED_COLS];
125                    instruction.ext_vec_addrs.ps_at_z.len()
126                ];
127
128                row_add.iter_mut().enumerate().for_each(|(row_idx, row)| {
129                    let cols: &mut FriFoldPreprocessedCols<BabyBear> =
130                        row.as_mut_slice().borrow_mut();
131                    unsafe {
132                        crate::sys::fri_fold_instr_to_row_babybear(
133                            &instruction.into(),
134                            row_idx,
135                            cols,
136                        );
137                    }
138                });
139
140                rows.extend(row_add);
141            });
142
143        // Pad the trace to a power of two.
144        if self.pad {
145            pad_rows_fixed(
146                &mut rows,
147                || [BabyBear::zero(); NUM_FRI_FOLD_PREPROCESSED_COLS],
148                self.fixed_log2_rows,
149            );
150        }
151
152        let trace = RowMajorMatrix::new(
153            unsafe {
154                std::mem::transmute::<Vec<BabyBear>, Vec<F>>(
155                    rows.into_iter().flatten().collect::<Vec<BabyBear>>(),
156                )
157            },
158            NUM_FRI_FOLD_PREPROCESSED_COLS,
159        );
160        Some(trace)
161    }
162
163    fn num_rows(&self, input: &Self::Record) -> Option<usize> {
164        let events = &input.fri_fold_events;
165        Some(next_power_of_two(events.len(), input.fixed_log2_rows(self)))
166    }
167
168    #[instrument(name = "generate fri fold trace", level = "debug", skip_all, fields(rows = input.fri_fold_events.len()))]
169    fn generate_trace(
170        &self,
171        input: &ExecutionRecord<F>,
172        _: &mut ExecutionRecord<F>,
173    ) -> RowMajorMatrix<F> {
174        assert_eq!(
175            std::any::TypeId::of::<F>(),
176            std::any::TypeId::of::<BabyBear>(),
177            "generate_trace only supports BabyBear field"
178        );
179
180        let events = unsafe {
181            std::mem::transmute::<&Vec<FriFoldEvent<F>>, &Vec<FriFoldEvent<BabyBear>>>(
182                &input.fri_fold_events,
183            )
184        };
185        let mut rows = events
186            .iter()
187            .map(|event| {
188                let mut row = [BabyBear::zero(); NUM_FRI_FOLD_COLS];
189                let cols: &mut FriFoldCols<BabyBear> = row.as_mut_slice().borrow_mut();
190                unsafe {
191                    crate::sys::fri_fold_event_to_row_babybear(event, cols);
192                }
193
194                row
195            })
196            .collect_vec();
197
198        // Pad the trace to a power of two.
199        if self.pad {
200            rows.resize(self.num_rows(input).unwrap(), [BabyBear::zero(); NUM_FRI_FOLD_COLS]);
201        }
202
203        // Convert the trace to a row major matrix.
204        let trace = RowMajorMatrix::new(
205            unsafe {
206                std::mem::transmute::<Vec<BabyBear>, Vec<F>>(
207                    rows.into_iter().flatten().collect::<Vec<BabyBear>>(),
208                )
209            },
210            NUM_FRI_FOLD_COLS,
211        );
212
213        #[cfg(debug_assertions)]
214        eprintln!(
215            "fri fold trace dims is width: {:?}, height: {:?}",
216            trace.width(),
217            trace.height()
218        );
219
220        trace
221    }
222
223    fn included(&self, _record: &Self::Record) -> bool {
224        true
225    }
226}
227
228impl<const DEGREE: usize> FriFoldChip<DEGREE> {
229    pub fn eval_fri_fold<AB: SP1RecursionAirBuilder>(
230        &self,
231        builder: &mut AB,
232        local: &FriFoldCols<AB::Var>,
233        next: &FriFoldCols<AB::Var>,
234        local_prepr: &FriFoldPreprocessedCols<AB::Var>,
235        next_prepr: &FriFoldPreprocessedCols<AB::Var>,
236    ) {
237        // Constrain mem read for x.  Read at the first fri fold row.
238        builder.send_single(local_prepr.x_mem.addr, local.x, local_prepr.x_mem.mult);
239
240        // Ensure that the x value is the same for all rows within a fri fold invocation.
241        builder
242            .when_transition()
243            .when(next_prepr.is_real)
244            .when_not(next_prepr.is_first)
245            .assert_eq(local.x, next.x);
246
247        // Constrain mem read for z.  Read at the first fri fold row.
248        builder.send_block(local_prepr.z_mem.addr, local.z, local_prepr.z_mem.mult);
249
250        // Ensure that the z value is the same for all rows within a fri fold invocation.
251        builder
252            .when_transition()
253            .when(next_prepr.is_real)
254            .when_not(next_prepr.is_first)
255            .assert_ext_eq(local.z.as_extension::<AB>(), next.z.as_extension::<AB>());
256
257        // Constrain mem read for alpha.  Read at the first fri fold row.
258        builder.send_block(local_prepr.alpha_mem.addr, local.alpha, local_prepr.alpha_mem.mult);
259
260        // Ensure that the alpha value is the same for all rows within a fri fold invocation.
261        builder
262            .when_transition()
263            .when(next_prepr.is_real)
264            .when_not(next_prepr.is_first)
265            .assert_ext_eq(local.alpha.as_extension::<AB>(), next.alpha.as_extension::<AB>());
266
267        // Constrain read for alpha_pow_input.
268        builder.send_block(
269            local_prepr.alpha_pow_input_mem.addr,
270            local.alpha_pow_input,
271            local_prepr.alpha_pow_input_mem.mult,
272        );
273
274        // Constrain read for ro_input.
275        builder.send_block(
276            local_prepr.ro_input_mem.addr,
277            local.ro_input,
278            local_prepr.ro_input_mem.mult,
279        );
280
281        // Constrain read for p_at_z.
282        builder.send_block(local_prepr.p_at_z_mem.addr, local.p_at_z, local_prepr.p_at_z_mem.mult);
283
284        // Constrain read for p_at_x.
285        builder.send_block(local_prepr.p_at_x_mem.addr, local.p_at_x, local_prepr.p_at_x_mem.mult);
286
287        // Constrain write for alpha_pow_output.
288        builder.send_block(
289            local_prepr.alpha_pow_output_mem.addr,
290            local.alpha_pow_output,
291            local_prepr.alpha_pow_output_mem.mult,
292        );
293
294        // Constrain write for ro_output.
295        builder.send_block(
296            local_prepr.ro_output_mem.addr,
297            local.ro_output,
298            local_prepr.ro_output_mem.mult,
299        );
300
301        // 1. Constrain new_value = old_value * alpha.
302        let alpha = local.alpha.as_extension::<AB>();
303        let old_alpha_pow = local.alpha_pow_input.as_extension::<AB>();
304        let new_alpha_pow = local.alpha_pow_output.as_extension::<AB>();
305        builder.assert_ext_eq(old_alpha_pow.clone() * alpha, new_alpha_pow.clone());
306
307        // 2. Constrain new_value = old_alpha_pow * quotient + old_ro,
308        // where quotient = (p_at_x - p_at_z) / (x - z)
309        // <=> (new_ro - old_ro) * (z - x) = old_alpha_pow * (p_at_x - p_at_z)
310        let p_at_z = local.p_at_z.as_extension::<AB>();
311        let p_at_x = local.p_at_x.as_extension::<AB>();
312        let z = local.z.as_extension::<AB>();
313        let x = local.x.into();
314        let old_ro = local.ro_input.as_extension::<AB>();
315        let new_ro = local.ro_output.as_extension::<AB>();
316        builder.assert_ext_eq(
317            (new_ro.clone() - old_ro) * (BinomialExtension::from_base(x) - z),
318            (p_at_x - p_at_z) * old_alpha_pow,
319        );
320    }
321
322    pub const fn do_memory_access<T: Copy>(local: &FriFoldPreprocessedCols<T>) -> T {
323        local.is_real
324    }
325}
326
327impl<AB, const DEGREE: usize> Air<AB> for FriFoldChip<DEGREE>
328where
329    AB: SP1RecursionAirBuilder + PairBuilder,
330{
331    fn eval(&self, builder: &mut AB) {
332        let main = builder.main();
333        let (local, next) = (main.row_slice(0), main.row_slice(1));
334        let local: &FriFoldCols<AB::Var> = (*local).borrow();
335        let next: &FriFoldCols<AB::Var> = (*next).borrow();
336        let prepr = builder.preprocessed();
337        let (prepr_local, prepr_next) = (prepr.row_slice(0), prepr.row_slice(1));
338        let prepr_local: &FriFoldPreprocessedCols<AB::Var> = (*prepr_local).borrow();
339        let prepr_next: &FriFoldPreprocessedCols<AB::Var> = (*prepr_next).borrow();
340
341        // Dummy constraints to normalize to DEGREE.
342        let lhs = (0..DEGREE).map(|_| prepr_local.is_real.into()).product::<AB::Expr>();
343        let rhs = (0..DEGREE).map(|_| prepr_local.is_real.into()).product::<AB::Expr>();
344        builder.assert_eq(lhs, rhs);
345
346        self.eval_fri_fold::<AB>(builder, local, next, prepr_local, prepr_next);
347    }
348}
349
350#[cfg(test)]
351mod tests {
352    #![allow(clippy::print_stdout)]
353
354    use crate::{
355        air::Block,
356        chips::{fri_fold::FriFoldChip, mem::MemoryAccessCols, test_fixtures},
357        machine::tests::test_recursion_linear_program,
358        runtime::{instruction as instr, ExecutionRecord},
359        stark::BabyBearPoseidon2Outer,
360        FriFoldBaseIo, FriFoldEvent, FriFoldExtSingleIo, FriFoldExtVecIo, Instruction,
361        MemAccessKind, RecursionProgram,
362    };
363    use p3_baby_bear::BabyBear;
364    use p3_field::{AbstractExtensionField, AbstractField};
365    use p3_matrix::dense::RowMajorMatrix;
366    use rand::{rngs::StdRng, Rng, SeedableRng};
367    use sp1_core_machine::utils::setup_logger;
368    use sp1_stark::{air::MachineAir, StarkGenericConfig};
369    use std::mem::size_of;
370
371    use super::*;
372
373    const DEGREE: usize = 3;
374
375    #[test]
376    fn prove_babybear_circuit_fri_fold() {
377        setup_logger();
378        type SC = BabyBearPoseidon2Outer;
379        type F = <SC as StarkGenericConfig>::Val;
380        type EF = <SC as StarkGenericConfig>::Challenge;
381
382        let mut rng = StdRng::seed_from_u64(0xDEADBEEF);
383        let mut random_felt = move || -> F { F::from_canonical_u32(rng.gen_range(0..1 << 16)) };
384        let mut rng = StdRng::seed_from_u64(0xDEADBEEF);
385        let mut random_block =
386            move || Block::from([F::from_canonical_u32(rng.gen_range(0..1 << 16)); 4]);
387        let mut addr = 0;
388
389        let num_ext_vecs: u32 = size_of::<FriFoldExtVecIo<u8>>() as u32;
390        let num_singles: u32 =
391            size_of::<FriFoldBaseIo<u8>>() as u32 + size_of::<FriFoldExtSingleIo<u8>>() as u32;
392
393        let instructions = (2..17)
394            .flat_map(|i: u32| {
395                let alloc_size = i * (num_ext_vecs + 2) + num_singles;
396
397                // Allocate the memory for a FRI fold instruction. Here, i is the lengths
398                // of the vectors for the vector fields of the instruction.
399                let mat_opening_a = (0..i).map(|x| x + addr).collect::<Vec<_>>();
400                let ps_at_z_a = (0..i).map(|x| x + i + addr).collect::<Vec<_>>();
401
402                let alpha_pow_input_a = (0..i).map(|x: u32| x + addr + 2 * i).collect::<Vec<_>>();
403                let ro_input_a = (0..i).map(|x: u32| x + addr + 3 * i).collect::<Vec<_>>();
404
405                let alpha_pow_output_a = (0..i).map(|x: u32| x + addr + 4 * i).collect::<Vec<_>>();
406                let ro_output_a = (0..i).map(|x: u32| x + addr + 5 * i).collect::<Vec<_>>();
407
408                let x_a = addr + 6 * i;
409                let z_a = addr + 6 * i + 1;
410                let alpha_a = addr + 6 * i + 2;
411
412                addr += alloc_size;
413
414                // Generate random values for the inputs.
415                let x = random_felt();
416                let z = random_block();
417                let alpha = random_block();
418
419                let alpha_pow_input = (0..i).map(|_| random_block()).collect::<Vec<_>>();
420                let ro_input = (0..i).map(|_| random_block()).collect::<Vec<_>>();
421
422                let ps_at_z = (0..i).map(|_| random_block()).collect::<Vec<_>>();
423                let mat_opening = (0..i).map(|_| random_block()).collect::<Vec<_>>();
424
425                // Compute the outputs from the inputs.
426                let alpha_pow_output = (0..i)
427                    .map(|i| alpha_pow_input[i as usize].ext::<EF>() * alpha.ext::<EF>())
428                    .collect::<Vec<EF>>();
429                let ro_output = (0..i)
430                    .map(|i| {
431                        let i = i as usize;
432                        ro_input[i].ext::<EF>() +
433                            alpha_pow_input[i].ext::<EF>() *
434                                (-ps_at_z[i].ext::<EF>() + mat_opening[i].ext::<EF>()) /
435                                (-z.ext::<EF>() + x)
436                    })
437                    .collect::<Vec<EF>>();
438
439                // Write the inputs to memory.
440                let mut instructions = vec![instr::mem_single(MemAccessKind::Write, 1, x_a, x)];
441
442                instructions.push(instr::mem_block(MemAccessKind::Write, 1, z_a, z));
443
444                instructions.push(instr::mem_block(MemAccessKind::Write, 1, alpha_a, alpha));
445
446                (0..i).for_each(|j_32| {
447                    let j = j_32 as usize;
448                    instructions.push(instr::mem_block(
449                        MemAccessKind::Write,
450                        1,
451                        mat_opening_a[j],
452                        mat_opening[j],
453                    ));
454                    instructions.push(instr::mem_block(
455                        MemAccessKind::Write,
456                        1,
457                        ps_at_z_a[j],
458                        ps_at_z[j],
459                    ));
460
461                    instructions.push(instr::mem_block(
462                        MemAccessKind::Write,
463                        1,
464                        alpha_pow_input_a[j],
465                        alpha_pow_input[j],
466                    ));
467                    instructions.push(instr::mem_block(
468                        MemAccessKind::Write,
469                        1,
470                        ro_input_a[j],
471                        ro_input[j],
472                    ));
473                });
474
475                // Generate the FRI fold instruction.
476                instructions.push(instr::fri_fold(
477                    z_a,
478                    alpha_a,
479                    x_a,
480                    mat_opening_a.clone(),
481                    ps_at_z_a.clone(),
482                    alpha_pow_input_a.clone(),
483                    ro_input_a.clone(),
484                    alpha_pow_output_a.clone(),
485                    ro_output_a.clone(),
486                    vec![1; i as usize],
487                    vec![1; i as usize],
488                ));
489
490                // Read all the outputs.
491                (0..i).for_each(|j| {
492                    let j = j as usize;
493                    instructions.push(instr::mem_block(
494                        MemAccessKind::Read,
495                        1,
496                        alpha_pow_output_a[j],
497                        Block::from(alpha_pow_output[j].as_base_slice()),
498                    ));
499                    instructions.push(instr::mem_block(
500                        MemAccessKind::Read,
501                        1,
502                        ro_output_a[j],
503                        Block::from(ro_output[j].as_base_slice()),
504                    ));
505                });
506
507                instructions
508            })
509            .collect::<Vec<Instruction<F>>>();
510
511        test_recursion_linear_program(instructions);
512    }
513
514    #[test]
515    fn generate_fri_fold_circuit_trace() {
516        type F = BabyBear;
517
518        let mut rng = StdRng::seed_from_u64(0xDEADBEEF);
519        let mut rng2 = StdRng::seed_from_u64(0xDEADBEEF);
520        let mut random_felt = move || -> F { F::from_canonical_u32(rng.gen_range(0..1 << 16)) };
521        let mut random_block = move || Block::from([random_felt(); 4]);
522
523        let shard = ExecutionRecord {
524            fri_fold_events: (0..17)
525                .map(|_| FriFoldEvent {
526                    base_single: FriFoldBaseIo {
527                        x: F::from_canonical_u32(rng2.gen_range(0..1 << 16)),
528                    },
529                    ext_single: FriFoldExtSingleIo { z: random_block(), alpha: random_block() },
530                    ext_vec: crate::FriFoldExtVecIo {
531                        mat_opening: random_block(),
532                        ps_at_z: random_block(),
533                        alpha_pow_input: random_block(),
534                        ro_input: random_block(),
535                        alpha_pow_output: random_block(),
536                        ro_output: random_block(),
537                    },
538                })
539                .collect(),
540            ..Default::default()
541        };
542        let chip = FriFoldChip::<3>::default();
543        let trace: RowMajorMatrix<F> = chip.generate_trace(&shard, &mut ExecutionRecord::default());
544        println!("{:?}", trace.values)
545    }
546
547    fn generate_trace_reference<const DEGREE: usize>(
548        input: &ExecutionRecord<BabyBear>,
549        _: &mut ExecutionRecord<BabyBear>,
550    ) -> RowMajorMatrix<BabyBear> {
551        type F = BabyBear;
552
553        let mut rows = input
554            .fri_fold_events
555            .iter()
556            .map(|event| {
557                let mut row = [F::zero(); NUM_FRI_FOLD_COLS];
558
559                let cols: &mut FriFoldCols<F> = row.as_mut_slice().borrow_mut();
560
561                cols.x = event.base_single.x;
562                cols.z = event.ext_single.z;
563                cols.alpha = event.ext_single.alpha;
564
565                cols.p_at_z = event.ext_vec.ps_at_z;
566                cols.p_at_x = event.ext_vec.mat_opening;
567                cols.alpha_pow_input = event.ext_vec.alpha_pow_input;
568                cols.ro_input = event.ext_vec.ro_input;
569
570                cols.alpha_pow_output = event.ext_vec.alpha_pow_output;
571                cols.ro_output = event.ext_vec.ro_output;
572
573                row
574            })
575            .collect_vec();
576
577        rows.resize(
578            FriFoldChip::<DEGREE>::default().num_rows(input).unwrap(),
579            [F::zero(); NUM_FRI_FOLD_COLS],
580        );
581
582        RowMajorMatrix::new(rows.into_iter().flatten().collect(), NUM_FRI_FOLD_COLS)
583    }
584
585    #[test]
586    fn test_generate_trace() {
587        let shard = test_fixtures::shard();
588        let mut execution_record = test_fixtures::default_execution_record();
589        let chip = FriFoldChip::<DEGREE>::default();
590        let trace = chip.generate_trace(&shard, &mut execution_record);
591        assert!(trace.height() >= test_fixtures::MIN_TEST_CASES);
592
593        assert_eq!(trace, generate_trace_reference::<DEGREE>(&shard, &mut execution_record));
594    }
595
596    fn generate_preprocessed_trace_reference<const DEGREE: usize>(
597        program: &RecursionProgram<BabyBear>,
598    ) -> RowMajorMatrix<BabyBear> {
599        type F = BabyBear;
600
601        let mut rows: Vec<[F; NUM_FRI_FOLD_PREPROCESSED_COLS]> = Vec::new();
602        program
603            .inner
604            .iter()
605            .filter_map(|instruction| match instruction {
606                Instruction::FriFold(instr) => Some(instr),
607                _ => None,
608            })
609            .for_each(|instruction| {
610                let FriFoldInstr {
611                    base_single_addrs,
612                    ext_single_addrs,
613                    ext_vec_addrs,
614                    alpha_pow_mults,
615                    ro_mults,
616                } = instruction.as_ref();
617                let mut row_add =
618                    vec![[F::zero(); NUM_FRI_FOLD_PREPROCESSED_COLS]; ext_vec_addrs.ps_at_z.len()];
619
620                row_add.iter_mut().enumerate().for_each(|(i, row)| {
621                    let row: &mut FriFoldPreprocessedCols<F> = row.as_mut_slice().borrow_mut();
622                    row.is_first = F::from_bool(i == 0);
623
624                    // Only need to read z, x, and alpha on the first iteration, hence the
625                    // multiplicities are i==0.
626                    row.z_mem =
627                        MemoryAccessCols { addr: ext_single_addrs.z, mult: -F::from_bool(i == 0) };
628                    row.x_mem =
629                        MemoryAccessCols { addr: base_single_addrs.x, mult: -F::from_bool(i == 0) };
630                    row.alpha_mem = MemoryAccessCols {
631                        addr: ext_single_addrs.alpha,
632                        mult: -F::from_bool(i == 0),
633                    };
634
635                    // Read the memory for the input vectors.
636                    row.alpha_pow_input_mem = MemoryAccessCols {
637                        addr: ext_vec_addrs.alpha_pow_input[i],
638                        mult: F::neg_one(),
639                    };
640                    row.ro_input_mem =
641                        MemoryAccessCols { addr: ext_vec_addrs.ro_input[i], mult: F::neg_one() };
642                    row.p_at_z_mem =
643                        MemoryAccessCols { addr: ext_vec_addrs.ps_at_z[i], mult: F::neg_one() };
644                    row.p_at_x_mem =
645                        MemoryAccessCols { addr: ext_vec_addrs.mat_opening[i], mult: F::neg_one() };
646
647                    // Write the memory for the output vectors.
648                    row.alpha_pow_output_mem = MemoryAccessCols {
649                        addr: ext_vec_addrs.alpha_pow_output[i],
650                        mult: alpha_pow_mults[i],
651                    };
652                    row.ro_output_mem =
653                        MemoryAccessCols { addr: ext_vec_addrs.ro_output[i], mult: ro_mults[i] };
654
655                    row.is_real = F::one();
656                });
657                rows.extend(row_add);
658            });
659
660        pad_rows_fixed(&mut rows, || [F::zero(); NUM_FRI_FOLD_PREPROCESSED_COLS], None);
661
662        RowMajorMatrix::new(rows.into_iter().flatten().collect(), NUM_FRI_FOLD_PREPROCESSED_COLS)
663    }
664
665    #[test]
666    #[ignore = "Failing due to merge conflicts. Will be fixed shortly."]
667    fn generate_preprocessed_trace() {
668        let program = test_fixtures::program();
669        let chip = FriFoldChip::<DEGREE>::default();
670        let trace = chip.generate_preprocessed_trace(&program).unwrap();
671        assert!(trace.height() >= test_fixtures::MIN_TEST_CASES);
672
673        assert_eq!(trace, generate_preprocessed_trace_reference::<DEGREE>(&program));
674    }
675}