1use crate::{builder::SP1RecursionAirBuilder, *};
2use core::borrow::Borrow;
3use p3_air::{Air, AirBuilder, BaseAir, PairBuilder};
4use p3_field::{Field, PrimeField32};
5use p3_matrix::{dense::RowMajorMatrix, Matrix};
6use sp1_core_machine::utils::next_power_of_two;
7use sp1_derive::AlignedBorrow;
8use sp1_stark::air::MachineAir;
9use std::iter::zip;
10#[cfg(feature = "sys")]
11use {
12 p3_baby_bear::BabyBear, p3_field::AbstractField, p3_maybe_rayon::prelude::*,
13 std::borrow::BorrowMut,
14};
15
16pub const NUM_BASE_ALU_ENTRIES_PER_ROW: usize = 4;
17
18#[derive(Default)]
19pub struct BaseAluChip;
20
21pub const NUM_BASE_ALU_COLS: usize = core::mem::size_of::<BaseAluCols<u8>>();
22
23#[derive(AlignedBorrow, Debug, Clone, Copy)]
24#[repr(C)]
25pub struct BaseAluCols<F: Copy> {
26 pub values: [BaseAluValueCols<F>; NUM_BASE_ALU_ENTRIES_PER_ROW],
27}
28
29pub const NUM_BASE_ALU_VALUE_COLS: usize = core::mem::size_of::<BaseAluValueCols<u8>>();
30
31#[derive(AlignedBorrow, Debug, Clone, Copy)]
32#[repr(C)]
33pub struct BaseAluValueCols<F: Copy> {
34 pub vals: BaseAluIo<F>,
35}
36
37pub const NUM_BASE_ALU_PREPROCESSED_COLS: usize =
38 core::mem::size_of::<BaseAluPreprocessedCols<u8>>();
39
40#[derive(AlignedBorrow, Debug, Clone, Copy)]
41#[repr(C)]
42pub struct BaseAluPreprocessedCols<F: Copy> {
43 pub accesses: [BaseAluAccessCols<F>; NUM_BASE_ALU_ENTRIES_PER_ROW],
44}
45
46pub const NUM_BASE_ALU_ACCESS_COLS: usize = core::mem::size_of::<BaseAluAccessCols<u8>>();
47
48#[derive(AlignedBorrow, Debug, Clone, Copy)]
49#[repr(C)]
50pub struct BaseAluAccessCols<F: Copy> {
51 pub addrs: BaseAluIo<Address<F>>,
52 pub is_add: F,
53 pub is_sub: F,
54 pub is_mul: F,
55 pub is_div: F,
56 pub mult: F,
57}
58
59impl<F: Field> BaseAir<F> for BaseAluChip {
60 fn width(&self) -> usize {
61 NUM_BASE_ALU_COLS
62 }
63}
64
65impl<F: PrimeField32> MachineAir<F> for BaseAluChip {
66 type Record = ExecutionRecord<F>;
67
68 type Program = crate::RecursionProgram<F>;
69
70 fn name(&self) -> String {
71 "BaseAlu".to_string()
72 }
73
74 fn preprocessed_width(&self) -> usize {
75 NUM_BASE_ALU_PREPROCESSED_COLS
76 }
77
78 fn preprocessed_num_rows(&self, program: &Self::Program, instrs_len: usize) -> Option<usize> {
79 let nb_rows = instrs_len.div_ceil(NUM_BASE_ALU_ENTRIES_PER_ROW);
80 let fixed_log2_rows = program.fixed_log2_rows(self);
81 Some(match fixed_log2_rows {
82 Some(log2_rows) => 1 << log2_rows,
83 None => next_power_of_two(nb_rows, None),
84 })
85 }
86
87 #[cfg(not(feature = "sys"))]
88 fn generate_preprocessed_trace(&self, _program: &Self::Program) -> Option<RowMajorMatrix<F>> {
89 unimplemented!("To generate traces, enable feature `sp1-recursion-core/sys`");
90 }
91
92 #[cfg(feature = "sys")]
93 fn generate_preprocessed_trace(&self, program: &Self::Program) -> Option<RowMajorMatrix<F>> {
94 assert_eq!(
95 std::any::TypeId::of::<F>(),
96 std::any::TypeId::of::<BabyBear>(),
97 "generate_preprocessed_trace only supports BabyBear field"
98 );
99
100 let instrs = unsafe {
101 std::mem::transmute::<Vec<&BaseAluInstr<F>>, Vec<&BaseAluInstr<BabyBear>>>(
102 program
103 .inner
104 .iter()
105 .filter_map(|instruction| match instruction {
106 Instruction::BaseAlu(x) => Some(x),
107 _ => None,
108 })
109 .collect::<Vec<_>>(),
110 )
111 };
112 let padded_nb_rows = self.preprocessed_num_rows(program, instrs.len()).unwrap();
113 let mut values = vec![BabyBear::zero(); padded_nb_rows * NUM_BASE_ALU_PREPROCESSED_COLS];
114
115 let populate_len = instrs.len() * NUM_BASE_ALU_ACCESS_COLS;
117 values[..populate_len].par_chunks_mut(NUM_BASE_ALU_ACCESS_COLS).zip_eq(instrs).for_each(
118 |(row, instr)| {
119 let access: &mut BaseAluAccessCols<_> = row.borrow_mut();
120 unsafe {
121 crate::sys::alu_base_instr_to_row_babybear(instr, access);
122 }
123 },
124 );
125
126 Some(RowMajorMatrix::new(
128 unsafe { std::mem::transmute::<Vec<BabyBear>, Vec<F>>(values) },
129 NUM_BASE_ALU_PREPROCESSED_COLS,
130 ))
131 }
132
133 fn generate_dependencies(&self, _: &Self::Record, _: &mut Self::Record) {
134 }
136
137 fn num_rows(&self, input: &Self::Record) -> Option<usize> {
138 let nb_rows = input.base_alu_events.len().div_ceil(NUM_BASE_ALU_ENTRIES_PER_ROW);
139 let fixed_log2_rows = input.fixed_log2_rows(self);
140 Some(match fixed_log2_rows {
141 Some(log2_rows) => 1 << log2_rows,
142 None => next_power_of_two(nb_rows, None),
143 })
144 }
145
146 #[cfg(not(feature = "sys"))]
147 fn generate_trace(&self, _input: &Self::Record, _: &mut Self::Record) -> RowMajorMatrix<F> {
148 unimplemented!("To generate traces, enable feature `sp1-recursion-core/sys`");
149 }
150
151 #[cfg(feature = "sys")]
152 fn generate_trace(&self, input: &Self::Record, _: &mut Self::Record) -> RowMajorMatrix<F> {
153 assert_eq!(
154 std::any::TypeId::of::<F>(),
155 std::any::TypeId::of::<BabyBear>(),
156 "generate_trace only supports BabyBear field"
157 );
158
159 let events = unsafe {
160 std::mem::transmute::<&Vec<BaseAluIo<F>>, &Vec<BaseAluIo<BabyBear>>>(
161 &input.base_alu_events,
162 )
163 };
164 let padded_nb_rows = self.num_rows(input).unwrap();
165 let mut values = vec![BabyBear::zero(); padded_nb_rows * NUM_BASE_ALU_COLS];
166
167 let populate_len = events.len() * NUM_BASE_ALU_VALUE_COLS;
169 values[..populate_len].par_chunks_mut(NUM_BASE_ALU_VALUE_COLS).zip_eq(events).for_each(
170 |(row, &vals)| {
171 let cols: &mut BaseAluValueCols<_> = row.borrow_mut();
172 unsafe {
173 crate::sys::alu_base_event_to_row_babybear(&vals, cols);
174 }
175 },
176 );
177
178 RowMajorMatrix::new(
180 unsafe { std::mem::transmute::<Vec<BabyBear>, Vec<F>>(values) },
181 NUM_BASE_ALU_COLS,
182 )
183 }
184
185 fn included(&self, _record: &Self::Record) -> bool {
186 true
187 }
188
189 fn local_only(&self) -> bool {
190 true
191 }
192}
193
194impl<AB> Air<AB> for BaseAluChip
195where
196 AB: SP1RecursionAirBuilder + PairBuilder,
197{
198 fn eval(&self, builder: &mut AB) {
199 let main = builder.main();
200 let local = main.row_slice(0);
201 let local: &BaseAluCols<AB::Var> = (*local).borrow();
202 let prep = builder.preprocessed();
203 let prep_local = prep.row_slice(0);
204 let prep_local: &BaseAluPreprocessedCols<AB::Var> = (*prep_local).borrow();
205
206 for (
207 BaseAluValueCols { vals: BaseAluIo { out, in1, in2 } },
208 BaseAluAccessCols { addrs, is_add, is_sub, is_mul, is_div, mult },
209 ) in zip(local.values, prep_local.accesses)
210 {
211 let is_real = is_add + is_sub + is_mul + is_div;
213 builder.assert_bool(is_real.clone());
214
215 builder.when(is_add).assert_eq(in1 + in2, out);
216 builder.when(is_sub).assert_eq(in1, in2 + out);
217 builder.when(is_mul).assert_eq(out, in1 * in2);
218 builder.when(is_div).assert_eq(in2 * out, in1);
219
220 builder.receive_single(addrs.in1, in1, is_real.clone());
221
222 builder.receive_single(addrs.in2, in2, is_real);
223
224 builder.send_single(addrs.out, out, mult);
225 }
226 }
227}
228
229#[cfg(all(test, feature = "sys"))]
230mod tests {
231 use crate::{chips::test_fixtures, runtime::instruction as instr};
232 use machine::tests::test_recursion_linear_program;
233 use p3_baby_bear::BabyBear;
234 use p3_field::AbstractField;
235 use p3_matrix::dense::RowMajorMatrix;
236 use rand::{rngs::StdRng, Rng, SeedableRng};
237 use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig};
238
239 use super::*;
240
241 fn generate_trace_reference(
242 input: &ExecutionRecord<BabyBear>,
243 _: &mut ExecutionRecord<BabyBear>,
244 ) -> RowMajorMatrix<BabyBear> {
245 let events = &input.base_alu_events;
246 let padded_nb_rows = BaseAluChip.num_rows(input).unwrap();
247 let mut values = vec![BabyBear::zero(); padded_nb_rows * NUM_BASE_ALU_COLS];
248
249 let populate_len = events.len() * NUM_BASE_ALU_VALUE_COLS;
250 values[..populate_len].par_chunks_mut(NUM_BASE_ALU_VALUE_COLS).zip_eq(events).for_each(
251 |(row, &vals)| {
252 let cols: &mut BaseAluValueCols<_> = row.borrow_mut();
253 *cols = BaseAluValueCols { vals };
254 },
255 );
256
257 RowMajorMatrix::new(values, NUM_BASE_ALU_COLS)
258 }
259
260 #[test]
261 fn generate_trace() {
262 let shard = test_fixtures::shard();
263 let mut execution_record = test_fixtures::default_execution_record();
264 let trace = BaseAluChip.generate_trace(&shard, &mut execution_record);
265 assert!(trace.height() >= test_fixtures::MIN_TEST_CASES);
266
267 assert_eq!(trace, generate_trace_reference(&shard, &mut execution_record));
268 }
269
270 fn generate_preprocessed_trace_reference(
271 program: &RecursionProgram<BabyBear>,
272 ) -> RowMajorMatrix<BabyBear> {
273 type F = BabyBear;
274
275 let instrs = program
276 .inner
277 .iter()
278 .filter_map(|instruction| match instruction {
279 Instruction::BaseAlu(x) => Some(x),
280 _ => None,
281 })
282 .collect::<Vec<_>>();
283 let padded_nb_rows = BaseAluChip.preprocessed_num_rows(program, instrs.len()).unwrap();
284 let mut values = vec![F::zero(); padded_nb_rows * NUM_BASE_ALU_PREPROCESSED_COLS];
285
286 let populate_len = instrs.len() * NUM_BASE_ALU_ACCESS_COLS;
287 values[..populate_len].par_chunks_mut(NUM_BASE_ALU_ACCESS_COLS).zip_eq(instrs).for_each(
288 |(row, instr)| {
289 let BaseAluInstr { opcode, mult, addrs } = instr;
290 let access: &mut BaseAluAccessCols<_> = row.borrow_mut();
291 *access = BaseAluAccessCols {
292 addrs: addrs.to_owned(),
293 is_add: F::from_bool(false),
294 is_sub: F::from_bool(false),
295 is_mul: F::from_bool(false),
296 is_div: F::from_bool(false),
297 mult: mult.to_owned(),
298 };
299 let target_flag = match opcode {
300 BaseAluOpcode::AddF => &mut access.is_add,
301 BaseAluOpcode::SubF => &mut access.is_sub,
302 BaseAluOpcode::MulF => &mut access.is_mul,
303 BaseAluOpcode::DivF => &mut access.is_div,
304 };
305 *target_flag = F::from_bool(true);
306 },
307 );
308
309 RowMajorMatrix::new(values, NUM_BASE_ALU_PREPROCESSED_COLS)
310 }
311
312 #[test]
313 #[ignore = "Failing due to merge conflicts. Will be fixed shortly."]
314 fn generate_preprocessed_trace() {
315 let program = test_fixtures::program();
316 let trace = BaseAluChip.generate_preprocessed_trace(&program).unwrap();
317 assert!(trace.height() >= test_fixtures::MIN_TEST_CASES);
318
319 assert_eq!(trace, generate_preprocessed_trace_reference(&program));
320 }
321
322 #[test]
323 pub fn four_ops() {
324 type SC = BabyBearPoseidon2;
325 type F = <SC as StarkGenericConfig>::Val;
326
327 let mut rng = StdRng::seed_from_u64(0xDEADBEEF);
328 let mut random_felt = move || -> F { rng.sample(rand::distributions::Standard) };
329 let mut addr = 0;
330
331 let instructions = (0..1000)
332 .flat_map(|_| {
333 let quot = random_felt();
334 let in2 = random_felt();
335 let in1 = in2 * quot;
336 let alloc_size = 6;
337 let a = (0..alloc_size).map(|x| x + addr).collect::<Vec<_>>();
338 addr += alloc_size;
339 [
340 instr::mem_single(MemAccessKind::Write, 4, a[0], in1),
341 instr::mem_single(MemAccessKind::Write, 4, a[1], in2),
342 instr::base_alu(BaseAluOpcode::AddF, 1, a[2], a[0], a[1]),
343 instr::mem_single(MemAccessKind::Read, 1, a[2], in1 + in2),
344 instr::base_alu(BaseAluOpcode::SubF, 1, a[3], a[0], a[1]),
345 instr::mem_single(MemAccessKind::Read, 1, a[3], in1 - in2),
346 instr::base_alu(BaseAluOpcode::MulF, 1, a[4], a[0], a[1]),
347 instr::mem_single(MemAccessKind::Read, 1, a[4], in1 * in2),
348 instr::base_alu(BaseAluOpcode::DivF, 1, a[5], a[0], a[1]),
349 instr::mem_single(MemAccessKind::Read, 1, a[5], quot),
350 ]
351 })
352 .collect::<Vec<Instruction<F>>>();
353
354 test_recursion_linear_program(instructions);
355 }
356}