sp1_recursion_machine/chips/poseidon2_helper/
convert.rs1use core::borrow::Borrow;
2use slop_air::{Air, BaseAir, PairBuilder};
3use slop_algebra::{extension::BinomiallyExtendable, Field, PrimeField32};
4use slop_matrix::Matrix;
5use slop_maybe_rayon::prelude::{IndexedParallelIterator, ParallelIterator, ParallelSliceMut};
6use sp1_derive::AlignedBorrow;
7use sp1_hypercube::{air::MachineAir, next_multiple_of_32};
8use sp1_primitives::SP1Field;
9use sp1_recursion_executor::{
10 Address, Block, ExecutionRecord, ExtFeltInstr, Instruction, RecursionProgram, D,
11};
12use std::{borrow::BorrowMut, iter::zip, mem::MaybeUninit};
13
14use crate::builder::SP1RecursionAirBuilder;
15
16pub const NUM_CONVERT_ENTRIES_PER_ROW: usize = 1;
17
18#[derive(Default, Clone)]
19pub struct ConvertChip;
20
21pub const NUM_CONVERT_COLS: usize = core::mem::size_of::<ConvertCols<u8>>();
22
23#[derive(AlignedBorrow, Debug, Clone, Copy)]
24#[repr(C)]
25pub struct ConvertCols<F: Copy> {
26 pub values: [ConvertValueCols<F>; NUM_CONVERT_ENTRIES_PER_ROW],
27}
28const NUM_CONVERT_VALUE_COLS: usize = core::mem::size_of::<ConvertValueCols<u8>>();
29
30#[derive(AlignedBorrow, Debug, Clone, Copy)]
31#[repr(C)]
32pub struct ConvertValueCols<F: Copy> {
33 pub input: Block<F>,
34}
35
36pub const NUM_CONVERT_PREPROCESSED_COLS: usize =
37 core::mem::size_of::<ConvertPreprocessedCols<u8>>();
38
39#[derive(AlignedBorrow, Debug, Clone, Copy)]
40#[repr(C)]
41pub struct ConvertPreprocessedCols<F: Copy> {
42 pub accesses: [ConvertAccessCols<F>; NUM_CONVERT_ENTRIES_PER_ROW],
43}
44
45pub const NUM_CONVERT_ACCESS_COLS: usize = core::mem::size_of::<ConvertAccessCols<u8>>();
46
47#[derive(AlignedBorrow, Debug, Clone, Copy)]
48#[repr(C)]
49pub struct ConvertAccessCols<F: Copy> {
50 pub addrs: [Address<F>; 5],
51 pub mults: [F; 5],
52}
53
54impl<F: Field> BaseAir<F> for ConvertChip {
55 fn width(&self) -> usize {
56 NUM_CONVERT_COLS
57 }
58}
59
60impl<F: PrimeField32 + BinomiallyExtendable<D>> MachineAir<F> for ConvertChip {
61 type Record = ExecutionRecord<F>;
62
63 type Program = RecursionProgram<F>;
64
65 fn name(&self) -> &'static str {
66 "ExtFeltConvert"
67 }
68
69 fn preprocessed_width(&self) -> usize {
70 NUM_CONVERT_PREPROCESSED_COLS
71 }
72
73 fn preprocessed_num_rows(&self, program: &Self::Program) -> Option<usize> {
74 let instrs_len = program
75 .inner
76 .iter()
77 .filter_map(|instruction| match instruction.inner() {
78 Instruction::ExtFelt(x) => Some(x),
79 _ => None,
80 })
81 .count();
82 self.preprocessed_num_rows_with_instrs_len(program, instrs_len)
83 }
84
85 fn preprocessed_num_rows_with_instrs_len(
86 &self,
87 program: &Self::Program,
88 instrs_len: usize,
89 ) -> Option<usize> {
90 let height = program.shape.as_ref().and_then(|shape| shape.height(self));
91 let nb_rows = instrs_len.div_ceil(NUM_CONVERT_ENTRIES_PER_ROW);
92 Some(next_multiple_of_32(nb_rows, height))
93 }
94
95 fn generate_preprocessed_trace_into(
96 &self,
97 program: &Self::Program,
98 buffer: &mut [MaybeUninit<F>],
99 ) {
100 assert_eq!(
101 std::any::TypeId::of::<F>(),
102 std::any::TypeId::of::<SP1Field>(),
103 "generate_preprocessed_trace only supports SP1Field field"
104 );
105
106 let instrs = program
107 .inner
108 .iter()
109 .filter_map(|instruction| match instruction.inner() {
110 Instruction::ExtFelt(x) => Some(x),
111 _ => None,
112 })
113 .collect::<Vec<_>>();
114
115 let padded_nb_rows =
116 self.preprocessed_num_rows_with_instrs_len(program, instrs.len()).unwrap();
117
118 let buffer_ptr = buffer.as_mut_ptr() as *mut F;
119 let values = unsafe {
120 core::slice::from_raw_parts_mut(
121 buffer_ptr,
122 padded_nb_rows * NUM_CONVERT_PREPROCESSED_COLS,
123 )
124 };
125
126 unsafe {
127 let padding_start = instrs.len() * NUM_CONVERT_ACCESS_COLS;
128 let padding_size = padded_nb_rows * NUM_CONVERT_PREPROCESSED_COLS - padding_start;
129 if padding_size > 0 {
130 core::ptr::write_bytes(buffer[padding_start..].as_mut_ptr(), 0, padding_size);
131 }
132 }
133
134 let populate_len = instrs.len() * NUM_CONVERT_ACCESS_COLS;
136 values[..populate_len].par_chunks_mut(NUM_CONVERT_ACCESS_COLS).zip_eq(instrs).for_each(
137 |(row, instr)| {
138 let ExtFeltInstr { addrs, mults, ext2felt } = instr;
139 let access: &mut ConvertAccessCols<_> = row.borrow_mut();
140 access.addrs = addrs.to_owned();
141 if *ext2felt {
142 access.mults[0] = F::one();
143 access.mults[1] = mults[1];
144 access.mults[2] = mults[2];
145 access.mults[3] = mults[3];
146 access.mults[4] = mults[4];
147 } else {
148 access.mults[0] = -mults[0];
149 access.mults[1] = -F::one();
150 access.mults[2] = -F::one();
151 access.mults[3] = -F::one();
152 access.mults[4] = -F::one();
153 }
154 },
155 );
156 }
157
158 fn generate_dependencies(&self, _: &Self::Record, _: &mut Self::Record) {
159 }
161
162 fn num_rows(&self, input: &Self::Record) -> Option<usize> {
163 let height = input.program.shape.as_ref().and_then(|shape| shape.height(self));
164 let events = &input.ext_felt_conversion_events;
165 let nb_rows = events.len().div_ceil(NUM_CONVERT_ENTRIES_PER_ROW);
166 Some(next_multiple_of_32(nb_rows, height))
167 }
168
169 fn generate_trace_into(
170 &self,
171 input: &ExecutionRecord<F>,
172 _: &mut ExecutionRecord<F>,
173 buffer: &mut [MaybeUninit<F>],
174 ) {
175 assert_eq!(
176 std::any::TypeId::of::<F>(),
177 std::any::TypeId::of::<SP1Field>(),
178 "generate_trace_into only supports SP1Field"
179 );
180 let padded_nb_rows = self.num_rows(input).unwrap();
181 let events = &input.ext_felt_conversion_events;
182 let num_event_rows = events.len();
183
184 unsafe {
185 let padding_start = num_event_rows * NUM_CONVERT_COLS;
186 let padding_size = (padded_nb_rows - num_event_rows) * NUM_CONVERT_COLS;
187 if padding_size > 0 {
188 core::ptr::write_bytes(buffer[padding_start..].as_mut_ptr(), 0, padding_size);
189 }
190 }
191
192 let buffer_ptr = buffer.as_mut_ptr() as *mut F;
193 let values = unsafe {
194 core::slice::from_raw_parts_mut(buffer_ptr, num_event_rows * NUM_CONVERT_COLS)
195 };
196
197 let populate_len = events.len() * NUM_CONVERT_VALUE_COLS;
199 values[..populate_len].par_chunks_mut(NUM_CONVERT_VALUE_COLS).zip_eq(events).for_each(
200 |(row, &vals)| {
201 let cols: &mut ConvertValueCols<_> = row.borrow_mut();
202 cols.input = vals.input.to_owned();
203 },
204 );
205 }
206
207 fn included(&self, _record: &Self::Record) -> bool {
208 true
209 }
210}
211
212impl<AB> Air<AB> for ConvertChip
213where
214 AB: SP1RecursionAirBuilder + PairBuilder,
215{
216 fn eval(&self, builder: &mut AB) {
217 let main = builder.main();
218 let local = main.row_slice(0);
219 let local: &ConvertCols<AB::Var> = (*local).borrow();
220 let prep = builder.preprocessed();
221 let prep_local = prep.row_slice(0);
222 let prep_local: &ConvertPreprocessedCols<AB::Var> = (*prep_local).borrow();
223
224 for (ConvertValueCols { input }, ConvertAccessCols { addrs, mults }) in
225 zip(local.values, prep_local.accesses)
226 {
227 builder.receive_block(addrs[0], input, mults[0]);
231
232 for i in 0..D {
236 builder.send_single(addrs[i + 1], input[i], mults[i + 1]);
237 }
238 }
239 }
240}
241
242#[cfg(test)]
243mod tests {
244 use slop_matrix::Matrix;
245 use sp1_hypercube::air::MachineAir;
246 use sp1_recursion_executor::ExecutionRecord;
247
248 use super::ConvertChip;
249
250 use crate::chips::test_fixtures;
251
252 #[tokio::test]
253 async fn generate_trace() {
254 let shard = test_fixtures::shard().await;
255 let trace = ConvertChip.generate_trace(shard, &mut ExecutionRecord::default());
256 assert!(trace.height() > test_fixtures::MIN_ROWS);
257 }
258
259 #[tokio::test]
260 async fn generate_preprocessed_trace() {
261 let program = &test_fixtures::program_with_input().await.0;
262 let trace = ConvertChip.generate_preprocessed_trace(program).unwrap();
263 assert!(trace.height() > test_fixtures::MIN_ROWS);
264 }
265}