Skip to main content

sp1_recursion_machine/chips/mem/
constant.rs

1use core::borrow::Borrow;
2use itertools::Itertools;
3use slop_air::{Air, BaseAir, PairBuilder};
4use slop_algebra::PrimeField32;
5use slop_matrix::Matrix;
6use sp1_derive::AlignedBorrow;
7use sp1_hypercube::{air::MachineAir, next_multiple_of_32};
8use sp1_recursion_executor::{
9    Block, ExecutionRecord, Instruction, MemAccessKind, MemInstr, RecursionProgram,
10};
11use std::{borrow::BorrowMut, iter::zip, marker::PhantomData, mem::MaybeUninit};
12
13use crate::builder::SP1RecursionAirBuilder;
14
15use super::MemoryAccessCols;
16
17pub const NUM_CONST_MEM_ENTRIES_PER_ROW: usize = 1;
18
19#[derive(Default, Clone)]
20pub struct MemoryConstChip<F> {
21    _marker: PhantomData<F>,
22}
23
24pub const NUM_MEM_INIT_COLS: usize = core::mem::size_of::<MemoryConstCols<u8>>();
25
26#[derive(AlignedBorrow, Debug, Clone, Copy)]
27#[repr(C)]
28pub struct MemoryConstCols<F: Copy> {
29    // At least one column is required, otherwise a bunch of things break.
30    _nothing: F,
31}
32
33pub const NUM_MEM_PREPROCESSED_INIT_COLS: usize =
34    core::mem::size_of::<MemoryConstPreprocessedCols<u8>>();
35
36#[derive(AlignedBorrow, Debug, Clone, Copy)]
37#[repr(C)]
38pub struct MemoryConstPreprocessedCols<F: Copy> {
39    values_and_accesses: [(Block<F>, MemoryAccessCols<F>); NUM_CONST_MEM_ENTRIES_PER_ROW],
40}
41impl<F: Send + Sync> BaseAir<F> for MemoryConstChip<F> {
42    fn width(&self) -> usize {
43        NUM_MEM_INIT_COLS
44    }
45}
46
47impl<F: PrimeField32> MachineAir<F> for MemoryConstChip<F> {
48    type Record = ExecutionRecord<F>;
49
50    type Program = RecursionProgram<F>;
51
52    fn name(&self) -> &'static str {
53        "MemoryConst"
54    }
55    fn preprocessed_width(&self) -> usize {
56        NUM_MEM_PREPROCESSED_INIT_COLS
57    }
58
59    fn preprocessed_num_rows(&self, program: &Self::Program) -> Option<usize> {
60        let instrs_len = program
61            .inner
62            .iter()
63            .filter_map(|instruction| match instruction.inner() {
64                Instruction::Mem(MemInstr { addrs, vals, mult, kind }) => {
65                    let mult = mult.to_owned();
66                    let mult = match kind {
67                        MemAccessKind::Read => -mult,
68                        MemAccessKind::Write => mult,
69                    };
70
71                    Some((vals.inner, MemoryAccessCols { addr: addrs.inner, mult }))
72                }
73                _ => None,
74            })
75            .chunks(NUM_CONST_MEM_ENTRIES_PER_ROW)
76            .into_iter()
77            .count();
78        self.preprocessed_num_rows_with_instrs_len(program, instrs_len)
79    }
80
81    fn preprocessed_num_rows_with_instrs_len(
82        &self,
83        program: &Self::Program,
84        instrs_len: usize,
85    ) -> Option<usize> {
86        let height = program.shape.as_ref().and_then(|shape| shape.height(self));
87        Some(next_multiple_of_32(instrs_len, height))
88    }
89
90    fn generate_preprocessed_trace_into(
91        &self,
92        program: &Self::Program,
93        buffer: &mut [MaybeUninit<F>],
94    ) {
95        let chunks = program
96            .inner
97            .iter()
98            .filter_map(|instruction| match instruction.inner() {
99                Instruction::Mem(MemInstr { addrs, vals, mult, kind }) => {
100                    let mult = mult.to_owned();
101                    let mult = match kind {
102                        MemAccessKind::Read => -mult,
103                        MemAccessKind::Write => mult,
104                    };
105
106                    Some((vals.inner, MemoryAccessCols { addr: addrs.inner, mult }))
107                }
108                _ => None,
109            })
110            .chunks(NUM_CONST_MEM_ENTRIES_PER_ROW);
111
112        let buffer_ptr = buffer.as_mut_ptr() as *mut F;
113
114        let mut nb_rows = 0;
115        for row_vs_as in &chunks {
116            let start = nb_rows * NUM_MEM_PREPROCESSED_INIT_COLS;
117            let values = unsafe {
118                core::slice::from_raw_parts_mut(
119                    buffer_ptr.add(start),
120                    NUM_MEM_PREPROCESSED_INIT_COLS,
121                )
122            };
123            let cols: &mut MemoryConstPreprocessedCols<_> = values.borrow_mut();
124            for (cell, access) in zip(&mut cols.values_and_accesses, row_vs_as) {
125                *cell = access;
126            }
127            nb_rows += 1;
128        }
129
130        let padded_nb_rows = self.preprocessed_num_rows_with_instrs_len(program, nb_rows).unwrap();
131
132        // NOTE: this is safe since there are always a single event per row.
133        unsafe {
134            let padding_start = nb_rows * NUM_MEM_PREPROCESSED_INIT_COLS;
135            let padding_size = padded_nb_rows * NUM_MEM_PREPROCESSED_INIT_COLS - padding_start;
136            if padding_size > 0 {
137                core::ptr::write_bytes(buffer[padding_start..].as_mut_ptr(), 0, padding_size);
138            }
139        }
140    }
141
142    fn generate_dependencies(&self, _: &Self::Record, _: &mut Self::Record) {
143        // This is a no-op.
144    }
145
146    fn num_rows(&self, input: &Self::Record) -> Option<usize> {
147        let height = input.program.shape.as_ref().and_then(|shape| shape.height(self));
148        let num_rows = input.mem_const_count.div_ceil(NUM_CONST_MEM_ENTRIES_PER_ROW);
149        let padded_nb_rows = next_multiple_of_32(num_rows, height);
150        Some(padded_nb_rows)
151    }
152
153    fn generate_trace_into(
154        &self,
155        input: &ExecutionRecord<F>,
156        _: &mut ExecutionRecord<F>,
157        buffer: &mut [MaybeUninit<F>],
158    ) {
159        let padded_nb_rows = self.num_rows(input).unwrap();
160        unsafe {
161            core::ptr::write_bytes(buffer.as_mut_ptr(), 0, padded_nb_rows);
162        }
163    }
164
165    fn included(&self, _record: &Self::Record) -> bool {
166        true
167    }
168}
169
170impl<AB> Air<AB> for MemoryConstChip<AB::F>
171where
172    AB: SP1RecursionAirBuilder + PairBuilder,
173{
174    fn eval(&self, builder: &mut AB) {
175        let prep = builder.preprocessed();
176        let prep_local = prep.row_slice(0);
177        let prep_local: &MemoryConstPreprocessedCols<AB::Var> = (*prep_local).borrow();
178
179        for (value, access) in prep_local.values_and_accesses {
180            builder.send_block(access.addr, value, access.mult);
181        }
182    }
183}
184
185#[cfg(test)]
186mod tests {
187    use slop_matrix::Matrix;
188    use sp1_hypercube::air::MachineAir;
189    use sp1_recursion_executor::{instruction as instr, ExecutionRecord, MemAccessKind};
190
191    use super::MemoryConstChip;
192
193    use crate::{chips::test_fixtures, test::test_recursion_linear_program};
194
195    #[tokio::test]
196    async fn generate_trace() {
197        let shard = test_fixtures::shard().await;
198        let chip = MemoryConstChip::default();
199        let trace = chip.generate_trace(shard, &mut ExecutionRecord::default());
200        assert!(trace.height() > test_fixtures::MIN_ROWS);
201    }
202
203    #[tokio::test]
204    async fn generate_preprocessed_trace() {
205        let program = &test_fixtures::program_with_input().await.0;
206        let chip = MemoryConstChip::default();
207        let trace = chip.generate_preprocessed_trace(program).unwrap();
208        assert!(trace.height() > test_fixtures::MIN_ROWS);
209    }
210
211    #[tokio::test]
212    pub async fn prove_basic_mem() {
213        test_recursion_linear_program(vec![
214            instr::mem(MemAccessKind::Write, 1, 1, 2),
215            instr::mem(MemAccessKind::Read, 1, 1, 2),
216        ])
217        .await;
218    }
219
220    #[tokio::test]
221    #[should_panic]
222    pub async fn basic_mem_bad_mult() {
223        test_recursion_linear_program(vec![
224            instr::mem(MemAccessKind::Write, 1, 1, 2),
225            instr::mem(MemAccessKind::Read, 9, 1, 2),
226        ])
227        .await;
228    }
229
230    #[tokio::test]
231    #[should_panic]
232    pub async fn basic_mem_bad_address() {
233        test_recursion_linear_program(vec![
234            instr::mem(MemAccessKind::Write, 1, 1, 2),
235            instr::mem(MemAccessKind::Read, 1, 9, 2),
236        ])
237        .await;
238    }
239
240    #[tokio::test]
241    #[should_panic]
242    pub async fn basic_mem_bad_value() {
243        test_recursion_linear_program(vec![
244            instr::mem(MemAccessKind::Write, 1, 1, 2),
245            instr::mem(MemAccessKind::Read, 1, 1, 999),
246        ])
247        .await;
248    }
249}