Skip to main content

sp1_recursion_machine/chips/
alu_ext.rs

1use core::borrow::Borrow;
2use slop_air::{Air, BaseAir, PairBuilder};
3use slop_algebra::{extension::BinomiallyExtendable, Field, PrimeField32};
4use slop_matrix::Matrix;
5use slop_maybe_rayon::prelude::{IndexedParallelIterator, ParallelIterator, ParallelSliceMut};
6use sp1_derive::AlignedBorrow;
7use sp1_hypercube::{
8    air::{ExtensionAirBuilder, MachineAir},
9    next_multiple_of_32,
10};
11use sp1_primitives::SP1Field;
12use sp1_recursion_executor::{
13    Address, Block, ExecutionRecord, ExtAluInstr, ExtAluIo, ExtAluOpcode, Instruction,
14    RecursionProgram, D,
15};
16use std::{borrow::BorrowMut, iter::zip, mem::MaybeUninit};
17
18use crate::builder::SP1RecursionAirBuilder;
19
20pub const NUM_EXT_ALU_ENTRIES_PER_ROW: usize = 1;
21
22#[derive(Default, Clone)]
23pub struct ExtAluChip;
24
25pub const NUM_EXT_ALU_COLS: usize = core::mem::size_of::<ExtAluCols<u8>>();
26
27#[derive(AlignedBorrow, Debug, Clone, Copy)]
28#[repr(C)]
29pub struct ExtAluCols<F: Copy> {
30    pub values: [ExtAluValueCols<F>; NUM_EXT_ALU_ENTRIES_PER_ROW],
31}
32const NUM_EXT_ALU_VALUE_COLS: usize = core::mem::size_of::<ExtAluValueCols<u8>>();
33
34#[derive(AlignedBorrow, Debug, Clone, Copy)]
35#[repr(C)]
36pub struct ExtAluValueCols<F: Copy> {
37    pub vals: ExtAluIo<Block<F>>,
38}
39
40pub const NUM_EXT_ALU_PREPROCESSED_COLS: usize = core::mem::size_of::<ExtAluPreprocessedCols<u8>>();
41
42#[derive(AlignedBorrow, Debug, Clone, Copy)]
43#[repr(C)]
44pub struct ExtAluPreprocessedCols<F: Copy> {
45    pub accesses: [ExtAluAccessCols<F>; NUM_EXT_ALU_ENTRIES_PER_ROW],
46}
47
48pub const NUM_EXT_ALU_ACCESS_COLS: usize = core::mem::size_of::<ExtAluAccessCols<u8>>();
49
50#[derive(AlignedBorrow, Debug, Clone, Copy)]
51#[repr(C)]
52pub struct ExtAluAccessCols<F: Copy> {
53    pub addrs: ExtAluIo<Address<F>>,
54    pub is_add: F,
55    pub is_sub: F,
56    pub is_mul: F,
57    pub is_div: F,
58    pub mult: F,
59}
60
61impl<F: Field> BaseAir<F> for ExtAluChip {
62    fn width(&self) -> usize {
63        NUM_EXT_ALU_COLS
64    }
65}
66
67impl<F: PrimeField32 + BinomiallyExtendable<D>> MachineAir<F> for ExtAluChip {
68    type Record = ExecutionRecord<F>;
69
70    type Program = RecursionProgram<F>;
71
72    fn name(&self) -> &'static str {
73        "ExtAlu"
74    }
75
76    fn preprocessed_width(&self) -> usize {
77        NUM_EXT_ALU_PREPROCESSED_COLS
78    }
79
80    fn preprocessed_num_rows(&self, program: &Self::Program) -> Option<usize> {
81        let instrs_len = program
82            .inner
83            .iter()
84            .filter_map(|instruction| match instruction.inner() {
85                Instruction::ExtAlu(x) => Some(x),
86                _ => None,
87            })
88            .count();
89        self.preprocessed_num_rows_with_instrs_len(program, instrs_len)
90    }
91
92    fn preprocessed_num_rows_with_instrs_len(
93        &self,
94        program: &Self::Program,
95        instrs_len: usize,
96    ) -> Option<usize> {
97        let height = program.shape.as_ref().and_then(|shape| shape.height(self));
98        let nb_rows = instrs_len.div_ceil(NUM_EXT_ALU_ENTRIES_PER_ROW);
99        Some(next_multiple_of_32(nb_rows, height))
100    }
101
102    fn generate_preprocessed_trace_into(
103        &self,
104        program: &Self::Program,
105        buffer: &mut [MaybeUninit<F>],
106    ) {
107        assert_eq!(
108            std::any::TypeId::of::<F>(),
109            std::any::TypeId::of::<SP1Field>(),
110            "generate_preprocessed_trace only supports SP1Field field"
111        );
112
113        let instrs = program
114            .inner
115            .iter()
116            .filter_map(|instruction| match instruction.inner() {
117                Instruction::ExtAlu(x) => Some(x),
118                _ => None,
119            })
120            .collect::<Vec<_>>();
121
122        let padded_nb_rows =
123            self.preprocessed_num_rows_with_instrs_len(program, instrs.len()).unwrap();
124
125        let buffer_ptr = buffer.as_mut_ptr() as *mut F;
126        let values = unsafe {
127            core::slice::from_raw_parts_mut(
128                buffer_ptr,
129                padded_nb_rows * NUM_EXT_ALU_PREPROCESSED_COLS,
130            )
131        };
132
133        unsafe {
134            let padding_start = instrs.len() * NUM_EXT_ALU_ACCESS_COLS;
135            let padding_size = padded_nb_rows * NUM_EXT_ALU_PREPROCESSED_COLS - padding_start;
136            if padding_size > 0 {
137                core::ptr::write_bytes(buffer[padding_start..].as_mut_ptr(), 0, padding_size);
138            }
139        }
140
141        // Generate the trace rows & corresponding records for each chunk of events in parallel.
142        let populate_len = instrs.len() * NUM_EXT_ALU_ACCESS_COLS;
143        values[..populate_len].par_chunks_mut(NUM_EXT_ALU_ACCESS_COLS).zip_eq(instrs).for_each(
144            |(row, instr)| {
145                let ExtAluInstr { opcode, mult, addrs } = instr;
146                let access: &mut ExtAluAccessCols<_> = row.borrow_mut();
147                *access = ExtAluAccessCols {
148                    addrs: addrs.to_owned(),
149                    is_add: F::from_bool(false),
150                    is_sub: F::from_bool(false),
151                    is_mul: F::from_bool(false),
152                    is_div: F::from_bool(false),
153                    mult: mult.to_owned(),
154                };
155                let target_flag = match opcode {
156                    ExtAluOpcode::AddE => &mut access.is_add,
157                    ExtAluOpcode::SubE => &mut access.is_sub,
158                    ExtAluOpcode::MulE => &mut access.is_mul,
159                    ExtAluOpcode::DivE => &mut access.is_div,
160                };
161                *target_flag = F::from_bool(true);
162            },
163        );
164    }
165
166    fn generate_dependencies(&self, _: &Self::Record, _: &mut Self::Record) {
167        // This is a no-op.
168    }
169
170    fn num_rows(&self, input: &Self::Record) -> Option<usize> {
171        let height = input.program.shape.as_ref().and_then(|shape| shape.height(self));
172        let events = &input.ext_alu_events;
173        let nb_rows = events.len().div_ceil(NUM_EXT_ALU_ENTRIES_PER_ROW);
174        Some(next_multiple_of_32(nb_rows, height))
175    }
176
177    fn generate_trace_into(
178        &self,
179        input: &ExecutionRecord<F>,
180        _: &mut ExecutionRecord<F>,
181        buffer: &mut [MaybeUninit<F>],
182    ) {
183        assert_eq!(
184            std::any::TypeId::of::<F>(),
185            std::any::TypeId::of::<SP1Field>(),
186            "generate_trace_into only supports SP1Field"
187        );
188
189        let events = &input.ext_alu_events;
190        let padded_nb_rows = self.num_rows(input).unwrap();
191        let num_event_rows = events.len();
192
193        unsafe {
194            let padding_start = num_event_rows * NUM_EXT_ALU_COLS;
195            let padding_size = (padded_nb_rows - num_event_rows) * NUM_EXT_ALU_COLS;
196            if padding_size > 0 {
197                core::ptr::write_bytes(buffer[padding_start..].as_mut_ptr(), 0, padding_size);
198            }
199        }
200
201        let buffer_ptr = buffer.as_mut_ptr() as *mut F;
202        let values = unsafe {
203            core::slice::from_raw_parts_mut(buffer_ptr, num_event_rows * NUM_EXT_ALU_COLS)
204        };
205
206        // Generate the trace rows & corresponding records for each chunk of events in parallel.
207        let populate_len = events.len() * NUM_EXT_ALU_VALUE_COLS;
208        values[..populate_len].par_chunks_mut(NUM_EXT_ALU_VALUE_COLS).zip_eq(events).for_each(
209            |(row, &vals)| {
210                let cols: &mut ExtAluValueCols<_> = row.borrow_mut();
211                *cols = ExtAluValueCols { vals };
212            },
213        );
214    }
215
216    fn included(&self, _record: &Self::Record) -> bool {
217        true
218    }
219}
220
221impl<AB> Air<AB> for ExtAluChip
222where
223    AB: SP1RecursionAirBuilder + PairBuilder,
224{
225    fn eval(&self, builder: &mut AB) {
226        let main = builder.main();
227        let local = main.row_slice(0);
228        let local: &ExtAluCols<AB::Var> = (*local).borrow();
229        let prep = builder.preprocessed();
230        let prep_local = prep.row_slice(0);
231        let prep_local: &ExtAluPreprocessedCols<AB::Var> = (*prep_local).borrow();
232
233        for (
234            ExtAluValueCols { vals },
235            ExtAluAccessCols { addrs, is_add, is_sub, is_mul, is_div, mult },
236        ) in zip(local.values, prep_local.accesses)
237        {
238            let in1 = vals.in1.as_extension::<AB>();
239            let in2 = vals.in2.as_extension::<AB>();
240            let out = vals.out.as_extension::<AB>();
241
242            // Check exactly one flag is enabled.
243            let is_real = is_add + is_sub + is_mul + is_div;
244            builder.assert_bool(is_real.clone());
245
246            builder.when(is_add).assert_ext_eq(in1.clone() + in2.clone(), out.clone());
247            builder.when(is_sub).assert_ext_eq(in1.clone(), in2.clone() + out.clone());
248            builder.when(is_mul).assert_ext_eq(in1.clone() * in2.clone(), out.clone());
249            builder.when(is_div).assert_ext_eq(in1, in2 * out);
250
251            // Read the inputs from memory.
252            builder.receive_block(addrs.in1, vals.in1, is_real.clone());
253            builder.receive_block(addrs.in2, vals.in2, is_real);
254
255            // Write the output to memory.
256            builder.send_block(addrs.out, vals.out, mult);
257        }
258    }
259}
260
261#[cfg(test)]
262mod tests {
263    use crate::{chips::test_fixtures, test::test_recursion_linear_program};
264    use rand::{rngs::StdRng, Rng, SeedableRng};
265    use slop_algebra::{extension::BinomialExtensionField, AbstractExtensionField};
266
267    use sp1_recursion_executor::{instruction as instr, ExtAluOpcode, MemAccessKind};
268
269    use super::*;
270
271    #[tokio::test]
272    async fn generate_trace() {
273        let shard = test_fixtures::shard().await;
274        let trace = ExtAluChip.generate_trace(shard, &mut ExecutionRecord::default());
275        assert!(trace.height() > test_fixtures::MIN_ROWS);
276    }
277
278    #[tokio::test]
279    async fn generate_preprocessed_trace() {
280        let program = &test_fixtures::program_with_input().await.0;
281        let trace = ExtAluChip.generate_preprocessed_trace(program).unwrap();
282        assert!(trace.height() > test_fixtures::MIN_ROWS);
283    }
284
285    #[tokio::test]
286    async fn four_ops() {
287        use sp1_primitives::SP1Field;
288        type F = SP1Field;
289        let mut rng = StdRng::seed_from_u64(0xDEADBEEF);
290        let mut random_extfelt = move || {
291            let inner: [F; 4] = core::array::from_fn(|_| rng.sample(rand::distributions::Standard));
292            BinomialExtensionField::<F, D>::from_base_slice(&inner)
293        };
294        let mut addr = 0;
295
296        let instructions = (0..1000)
297            .flat_map(|_| {
298                let quot = random_extfelt();
299                let in2 = random_extfelt();
300                let in1 = in2 * quot;
301                let alloc_size = 6;
302                let a = (0..alloc_size).map(|x| x + addr).collect::<Vec<_>>();
303                addr += alloc_size;
304                [
305                    instr::mem_ext(MemAccessKind::Write, 4, a[0], in1),
306                    instr::mem_ext(MemAccessKind::Write, 4, a[1], in2),
307                    instr::ext_alu(ExtAluOpcode::AddE, 1, a[2], a[0], a[1]),
308                    instr::mem_ext(MemAccessKind::Read, 1, a[2], in1 + in2),
309                    instr::ext_alu(ExtAluOpcode::SubE, 1, a[3], a[0], a[1]),
310                    instr::mem_ext(MemAccessKind::Read, 1, a[3], in1 - in2),
311                    instr::ext_alu(ExtAluOpcode::MulE, 1, a[4], a[0], a[1]),
312                    instr::mem_ext(MemAccessKind::Read, 1, a[4], in1 * in2),
313                    instr::ext_alu(ExtAluOpcode::DivE, 1, a[5], a[0], a[1]),
314                    instr::mem_ext(MemAccessKind::Read, 1, a[5], quot),
315                ]
316            })
317            .collect::<Vec<Instruction<F>>>();
318
319        test_recursion_linear_program(instructions).await;
320    }
321}