use alloc::vec::Vec;
use core::{
borrow::{Borrow, BorrowMut},
ops::Range,
};
use miden_core::{
Felt, ONE, WORD_SIZE, Word, ZERO,
field::PrimeCharacteristicRing,
utils::{Matrix, RowMajorMatrix, range},
};
use super::{
CHIPLETS_OFFSET, CHIPLETS_WIDTH, CLK_COL_IDX, CTX_COL_IDX, DECODER_TRACE_OFFSET,
DECODER_TRACE_WIDTH, FN_HASH_OFFSET, RANGE_CHECK_TRACE_OFFSET, RANGE_CHECK_TRACE_WIDTH,
RowIndex, STACK_TRACE_OFFSET, STACK_TRACE_WIDTH, TRACE_WIDTH,
chiplets::{
BITWISE_A_COL_IDX, BITWISE_B_COL_IDX, BITWISE_OUTPUT_COL_IDX, HASHER_DIRECTION_BIT_COL_IDX,
HASHER_IS_BOUNDARY_COL_IDX, HASHER_MRUPDATE_ID_COL_IDX, HASHER_NODE_INDEX_COL_IDX,
HASHER_S_PERM_COL_IDX, HASHER_STATE_COL_RANGE, MEMORY_CLK_COL_IDX, MEMORY_CTX_COL_IDX,
MEMORY_IDX0_COL_IDX, MEMORY_IDX1_COL_IDX, MEMORY_V_COL_RANGE, MEMORY_WORD_ADDR_HI_COL_IDX,
MEMORY_WORD_ADDR_LO_COL_IDX, MEMORY_WORD_COL_IDX, NUM_ACE_SELECTORS,
ace::{
CLK_IDX, CTX_IDX, EVAL_OP_IDX, ID_0_IDX, ID_1_IDX, ID_2_IDX, M_0_IDX, M_1_IDX, PTR_IDX,
READ_NUM_EVAL_IDX, SELECTOR_BLOCK_IDX, SELECTOR_START_IDX, V_0_0_IDX, V_0_1_IDX,
V_1_0_IDX, V_1_1_IDX, V_2_0_IDX, V_2_1_IDX,
},
hasher::{DIGEST_LEN, STATE_WIDTH},
},
decoder::{
GROUP_COUNT_COL_IDX, HASHER_STATE_OFFSET, IN_SPAN_COL_IDX, IS_CALL_FLAG_COL_IDX,
IS_LOOP_BODY_FLAG_COL_IDX, IS_LOOP_FLAG_COL_IDX, IS_SYSCALL_FLAG_COL_IDX,
NUM_HASHER_COLUMNS, NUM_OP_BATCH_FLAGS, OP_BATCH_FLAGS_OFFSET, OP_BITS_EXTRA_COLS_OFFSET,
USER_OP_HELPERS_OFFSET,
},
stack::{B0_COL_IDX, B1_COL_IDX, H0_COL_IDX},
};
const DECODER_HASHER_RANGE: Range<usize> =
range(DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET, NUM_HASHER_COLUMNS);
#[derive(Debug)]
#[repr(C)]
pub struct MainTraceRow<T> {
pub clk: T,
pub ctx: T,
pub fn_hash: [T; WORD_SIZE],
pub decoder: [T; DECODER_TRACE_WIDTH],
pub stack: [T; STACK_TRACE_WIDTH],
pub range: [T; RANGE_CHECK_TRACE_WIDTH],
pub chiplets: [T; CHIPLETS_WIDTH],
}
impl<T> Borrow<MainTraceRow<T>> for [T] {
fn borrow(&self) -> &MainTraceRow<T> {
debug_assert_eq!(self.len(), TRACE_WIDTH);
let (prefix, shorts, suffix) = unsafe { self.align_to::<MainTraceRow<T>>() };
debug_assert!(prefix.is_empty(), "Alignment should match");
debug_assert!(suffix.is_empty(), "Alignment should match");
debug_assert_eq!(shorts.len(), 1);
&shorts[0]
}
}
impl<T> BorrowMut<MainTraceRow<T>> for [T] {
fn borrow_mut(&mut self) -> &mut MainTraceRow<T> {
debug_assert_eq!(self.len(), TRACE_WIDTH);
let (prefix, shorts, suffix) = unsafe { self.align_to_mut::<MainTraceRow<T>>() };
debug_assert!(prefix.is_empty(), "Alignment should match");
debug_assert!(suffix.is_empty(), "Alignment should match");
debug_assert_eq!(shorts.len(), 1);
&mut shorts[0]
}
}
#[derive(Debug)]
enum TraceStorage {
Parts {
core_rm: Vec<Felt>,
chiplets_rm: Vec<Felt>,
range_checker_cols: [Vec<Felt>; 2],
num_rows: usize,
},
RowMajor(RowMajorMatrix<Felt>),
Transposed {
matrix: RowMajorMatrix<Felt>,
num_cols: usize,
num_rows: usize,
},
}
#[derive(Debug)]
pub struct MainTrace {
storage: TraceStorage,
last_program_row: RowIndex,
}
const CORE_WIDTH: usize = RANGE_CHECK_TRACE_OFFSET;
#[cfg(feature = "concurrent")]
const ROW_MAJOR_CHUNK_SIZE: usize = 512;
impl MainTrace {
pub fn new(matrix: RowMajorMatrix<Felt>, last_program_row: RowIndex) -> Self {
Self {
storage: TraceStorage::RowMajor(matrix),
last_program_row,
}
}
pub fn from_parts(
core_rm: Vec<Felt>,
chiplets_rm: Vec<Felt>,
range_checker_cols: [Vec<Felt>; 2],
num_rows: usize,
last_program_row: RowIndex,
) -> Self {
assert_eq!(core_rm.len(), num_rows * CORE_WIDTH);
assert_eq!(chiplets_rm.len(), num_rows * CHIPLETS_WIDTH);
assert_eq!(range_checker_cols[0].len(), num_rows);
assert_eq!(range_checker_cols[1].len(), num_rows);
Self {
storage: TraceStorage::Parts {
core_rm,
chiplets_rm,
range_checker_cols,
num_rows,
},
last_program_row,
}
}
pub fn from_transposed(transposed: RowMajorMatrix<Felt>, last_program_row: RowIndex) -> Self {
let num_cols = transposed.height();
let num_rows = transposed.width();
Self {
storage: TraceStorage::Transposed { matrix: transposed, num_cols, num_rows },
last_program_row,
}
}
#[inline]
pub fn get(&self, row: RowIndex, col: usize) -> Felt {
let r = row.as_usize();
match &self.storage {
TraceStorage::Parts {
core_rm,
chiplets_rm,
range_checker_cols,
num_rows,
} => {
assert!(r < *num_rows, "main trace row index in bounds");
assert!(col < TRACE_WIDTH, "main trace column index in bounds");
if col < CORE_WIDTH {
core_rm[r * CORE_WIDTH + col]
} else {
let nc = col - CORE_WIDTH;
if nc < RANGE_CHECK_TRACE_WIDTH {
range_checker_cols[nc][r]
} else {
chiplets_rm[r * CHIPLETS_WIDTH + (nc - RANGE_CHECK_TRACE_WIDTH)]
}
}
},
TraceStorage::RowMajor(matrix) => {
let row_slice = matrix.row_slice(r).expect("main trace row index in bounds");
assert!(col < row_slice.len(), "main trace column index in bounds");
row_slice[col]
},
TraceStorage::Transposed { matrix, num_cols, .. } => {
let col_slice = matrix.row_slice(col).expect("main trace column index in bounds");
assert!(r < col_slice.len(), "main trace row index in bounds");
debug_assert_eq!(col_slice.len(), matrix.width());
debug_assert_eq!(matrix.height(), *num_cols);
col_slice[r]
},
}
}
#[inline]
pub fn width(&self) -> usize {
match &self.storage {
TraceStorage::Parts { .. } => TRACE_WIDTH,
TraceStorage::RowMajor(matrix) => matrix.width(),
TraceStorage::Transposed { num_cols, .. } => *num_cols,
}
}
pub fn to_row_major(&self) -> RowMajorMatrix<Felt> {
match &self.storage {
TraceStorage::RowMajor(matrix) => matrix.clone(),
TraceStorage::Transposed { matrix, .. } => matrix.transpose(),
TraceStorage::Parts {
core_rm,
chiplets_rm,
range_checker_cols,
num_rows,
} => {
let h = *num_rows;
let w = TRACE_WIDTH;
let cw = CHIPLETS_WIDTH;
let total = h * w;
let mut data = Vec::with_capacity(total);
#[allow(clippy::uninit_vec)]
unsafe {
data.set_len(total);
}
let fill_rows = |chunk: &mut [Felt], start_row: usize| {
let chunk_rows = chunk.len() / w;
for i in 0..chunk_rows {
let row = start_row + i;
let dst = &mut chunk[i * w..(i + 1) * w];
dst[..CORE_WIDTH]
.copy_from_slice(&core_rm[row * CORE_WIDTH..(row + 1) * CORE_WIDTH]);
dst[CORE_WIDTH] = range_checker_cols[0][row];
dst[CORE_WIDTH + 1] = range_checker_cols[1][row];
dst[CORE_WIDTH + 2..CORE_WIDTH + 2 + cw]
.copy_from_slice(&chiplets_rm[row * cw..(row + 1) * cw]);
}
};
#[cfg(not(feature = "concurrent"))]
fill_rows(&mut data, 0);
#[cfg(feature = "concurrent")]
{
use miden_crypto::parallel::*;
let rows_per_chunk = ROW_MAJOR_CHUNK_SIZE;
data.par_chunks_mut(rows_per_chunk * w).enumerate().for_each(
|(chunk_idx, chunk)| {
fill_rows(chunk, chunk_idx * rows_per_chunk);
},
);
}
RowMajorMatrix::new(data, w)
},
}
}
pub fn num_rows(&self) -> usize {
match &self.storage {
TraceStorage::Parts { num_rows, .. } => *num_rows,
TraceStorage::RowMajor(matrix) => matrix.height(),
TraceStorage::Transposed { num_rows, .. } => *num_rows,
}
}
pub fn last_program_row(&self) -> RowIndex {
self.last_program_row
}
pub fn read_row_into(&self, row_idx: usize, row: &mut [Felt]) {
let w = self.width();
assert!(row.len() >= w, "row buffer too small for main trace");
match &self.storage {
TraceStorage::RowMajor(matrix) => {
let slice = matrix.row_slice(row_idx).expect("row index in bounds");
row[..w].copy_from_slice(&slice);
},
TraceStorage::Parts {
core_rm, chiplets_rm, range_checker_cols, ..
} => {
row[..CORE_WIDTH]
.copy_from_slice(&core_rm[row_idx * CORE_WIDTH..(row_idx + 1) * CORE_WIDTH]);
row[CORE_WIDTH] = range_checker_cols[0][row_idx];
row[CORE_WIDTH + 1] = range_checker_cols[1][row_idx];
row[CORE_WIDTH + 2..CORE_WIDTH + 2 + CHIPLETS_WIDTH].copy_from_slice(
&chiplets_rm[row_idx * CHIPLETS_WIDTH..(row_idx + 1) * CHIPLETS_WIDTH],
);
},
TraceStorage::Transposed { matrix, num_cols, .. } => {
for (col_idx, cell) in row[..*num_cols].iter_mut().enumerate() {
*cell = unsafe { matrix.get_unchecked(col_idx, row_idx) };
}
},
}
}
pub fn get_column(&self, col_idx: usize) -> Vec<Felt> {
let h = self.num_rows();
match &self.storage {
TraceStorage::Parts {
core_rm, chiplets_rm, range_checker_cols, ..
} => {
assert!(col_idx < TRACE_WIDTH, "main trace column index in bounds");
if col_idx < CORE_WIDTH {
(0..h).map(|r| core_rm[r * CORE_WIDTH + col_idx]).collect()
} else {
let nc = col_idx - CORE_WIDTH;
if nc < RANGE_CHECK_TRACE_WIDTH {
range_checker_cols[nc].clone()
} else {
let cc = nc - RANGE_CHECK_TRACE_WIDTH;
(0..h).map(|r| chiplets_rm[r * CHIPLETS_WIDTH + cc]).collect()
}
}
},
TraceStorage::RowMajor(_) => {
(0..h).map(|r| self.get(RowIndex::from(r), col_idx)).collect()
},
TraceStorage::Transposed { matrix, .. } => {
let row_slice = matrix.row_slice(col_idx).expect("column index in bounds");
row_slice[..h].to_vec()
},
}
}
pub fn columns(&self) -> impl Iterator<Item = Vec<Felt>> + '_ {
(0..self.width()).map(|c| self.get_column(c))
}
#[cfg(any(test, feature = "testing"))]
pub fn get_column_range(&self, range: Range<usize>) -> Vec<Vec<Felt>> {
range.fold(vec![], |mut acc, col_idx| {
acc.push(self.get_column(col_idx));
acc
})
}
pub fn clk(&self, i: RowIndex) -> Felt {
self.get(i, CLK_COL_IDX)
}
pub fn ctx(&self, i: RowIndex) -> Felt {
self.get(i, CTX_COL_IDX)
}
pub fn addr(&self, i: RowIndex) -> Felt {
self.get(i, DECODER_TRACE_OFFSET)
}
pub fn is_addr_change(&self, i: RowIndex) -> bool {
self.addr(i) != self.addr(i + 1)
}
pub fn helper_register(&self, i: usize, row: RowIndex) -> Felt {
self.get(row, DECODER_TRACE_OFFSET + USER_OP_HELPERS_OFFSET + i)
}
pub fn decoder_hasher_state(&self, i: RowIndex) -> [Felt; NUM_HASHER_COLUMNS] {
let mut state = [ZERO; NUM_HASHER_COLUMNS];
for (idx, col_idx) in DECODER_HASHER_RANGE.enumerate() {
state[idx] = self.get(i, col_idx);
}
state
}
pub fn decoder_hasher_state_first_half(&self, i: RowIndex) -> Word {
let mut state = [ZERO; DIGEST_LEN];
for (col, s) in state.iter_mut().enumerate() {
*s = self.get(i, DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET + col);
}
state.into()
}
pub fn decoder_hasher_state_second_half(&self, i: RowIndex) -> Word {
const SECOND_WORD_OFFSET: usize = 4;
let mut state = [ZERO; DIGEST_LEN];
for (col, s) in state.iter_mut().enumerate() {
*s = self.get(i, DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET + SECOND_WORD_OFFSET + col);
}
state.into()
}
pub fn decoder_hasher_state_element(&self, element: usize, i: RowIndex) -> Felt {
self.get(i, DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET + element)
}
pub fn fn_hash(&self, i: RowIndex) -> [Felt; DIGEST_LEN] {
let mut state = [ZERO; DIGEST_LEN];
for (col, s) in state.iter_mut().enumerate() {
*s = self.get(i, FN_HASH_OFFSET + col);
}
state
}
pub fn is_loop_body_flag(&self, i: RowIndex) -> Felt {
self.get(i, DECODER_TRACE_OFFSET + IS_LOOP_BODY_FLAG_COL_IDX)
}
pub fn is_loop_flag(&self, i: RowIndex) -> Felt {
self.get(i, DECODER_TRACE_OFFSET + IS_LOOP_FLAG_COL_IDX)
}
pub fn is_call_flag(&self, i: RowIndex) -> Felt {
self.get(i, DECODER_TRACE_OFFSET + IS_CALL_FLAG_COL_IDX)
}
pub fn is_syscall_flag(&self, i: RowIndex) -> Felt {
self.get(i, DECODER_TRACE_OFFSET + IS_SYSCALL_FLAG_COL_IDX)
}
pub fn op_batch_flag(&self, i: RowIndex) -> [Felt; NUM_OP_BATCH_FLAGS] {
[
self.get(i, DECODER_TRACE_OFFSET + OP_BATCH_FLAGS_OFFSET),
self.get(i, DECODER_TRACE_OFFSET + OP_BATCH_FLAGS_OFFSET + 1),
self.get(i, DECODER_TRACE_OFFSET + OP_BATCH_FLAGS_OFFSET + 2),
]
}
pub fn group_count(&self, i: RowIndex) -> Felt {
self.get(i, DECODER_TRACE_OFFSET + GROUP_COUNT_COL_IDX)
}
pub fn delta_group_count(&self, i: RowIndex) -> Felt {
self.group_count(i) - self.group_count(i + 1)
}
pub fn is_in_span(&self, i: RowIndex) -> Felt {
self.get(i, DECODER_TRACE_OFFSET + IN_SPAN_COL_IDX)
}
pub fn get_op_code(&self, i: RowIndex) -> Felt {
let b0 = self.get(i, DECODER_TRACE_OFFSET + 1);
let b1 = self.get(i, DECODER_TRACE_OFFSET + 2);
let b2 = self.get(i, DECODER_TRACE_OFFSET + 3);
let b3 = self.get(i, DECODER_TRACE_OFFSET + 4);
let b4 = self.get(i, DECODER_TRACE_OFFSET + 5);
let b5 = self.get(i, DECODER_TRACE_OFFSET + 6);
let b6 = self.get(i, DECODER_TRACE_OFFSET + 7);
b0 + b1 * Felt::from_u64(2)
+ b2 * Felt::from_u64(4)
+ b3 * Felt::from_u64(8)
+ b4 * Felt::from_u64(16)
+ b5 * Felt::from_u64(32)
+ b6 * Felt::from_u64(64)
}
pub fn row_iter(&self) -> impl Iterator<Item = RowIndex> {
(0..self.num_rows()).map(RowIndex::from)
}
pub fn is_left_shift(&self, i: RowIndex) -> bool {
let b0 = self.get(i, DECODER_TRACE_OFFSET + 1);
let b1 = self.get(i, DECODER_TRACE_OFFSET + 2);
let b2 = self.get(i, DECODER_TRACE_OFFSET + 3);
let b3 = self.get(i, DECODER_TRACE_OFFSET + 4);
let b4 = self.get(i, DECODER_TRACE_OFFSET + 5);
let b5 = self.get(i, DECODER_TRACE_OFFSET + 6);
let b6 = self.get(i, DECODER_TRACE_OFFSET + 7);
let e0 = self.get(i, DECODER_TRACE_OFFSET + OP_BITS_EXTRA_COLS_OFFSET);
let h5 = self.get(i, DECODER_TRACE_OFFSET + IS_LOOP_FLAG_COL_IDX);
([b6, b5, b4] == [ZERO, ONE, ZERO])||
([b6, b5, b4, b3, b2] == [ONE, ZERO, ZERO, ONE, ONE]) ||
([e0, b3, b2, b1] == [ONE, ZERO, ONE, ZERO]) ||
([b6, b5, b4, b3, b2, b1, b0] == [ONE, ONE, ONE, ZERO, ONE, ZERO, ZERO]) ||
([b6, b5, b4, b3, b2, b1, b0] == [ONE, ZERO, ONE, ONE, ZERO, ZERO, ZERO]) ||
([b6, b5, b4, b3, b2, b1, b0] == [ONE, ONE, ONE, ZERO, ZERO, ZERO, ZERO] && h5 == ONE)
}
pub fn is_right_shift(&self, i: RowIndex) -> bool {
let b0 = self.get(i, DECODER_TRACE_OFFSET + 1);
let b1 = self.get(i, DECODER_TRACE_OFFSET + 2);
let b2 = self.get(i, DECODER_TRACE_OFFSET + 3);
let b3 = self.get(i, DECODER_TRACE_OFFSET + 4);
let b4 = self.get(i, DECODER_TRACE_OFFSET + 5);
let b5 = self.get(i, DECODER_TRACE_OFFSET + 6);
let b6 = self.get(i, DECODER_TRACE_OFFSET + 7);
[b6, b5, b4] == [ZERO, ONE, ONE]||
([b6, b5, b4, b3, b2, b1, b0] == [ONE, ZERO, ZERO, ONE, ZERO, ZERO, ZERO]) ||
([b6, b5, b4, b3, b2, b1, b0] == [ONE, ZERO, ONE, ONE, ZERO, ONE, ONE])
}
pub fn stack_depth(&self, i: RowIndex) -> Felt {
self.get(i, STACK_TRACE_OFFSET + B0_COL_IDX)
}
pub fn stack_element(&self, column: usize, i: RowIndex) -> Felt {
self.get(i, STACK_TRACE_OFFSET + column)
}
pub fn stack_word(&self, start: usize, i: RowIndex) -> Word {
Word::from([
self.stack_element(start, i),
self.stack_element(start + 1, i),
self.stack_element(start + 2, i),
self.stack_element(start + 3, i),
])
}
pub fn parent_overflow_address(&self, i: RowIndex) -> Felt {
self.get(i, STACK_TRACE_OFFSET + B1_COL_IDX)
}
pub fn is_non_empty_overflow(&self, i: RowIndex) -> bool {
let b0 = self.get(i, STACK_TRACE_OFFSET + B0_COL_IDX);
let h0 = self.get(i, STACK_TRACE_OFFSET + H0_COL_IDX);
(b0 - Felt::from_u64(16)) * h0 == ONE
}
pub fn chiplet_selector_0(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET)
}
pub fn chiplet_selector_1(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + 1)
}
pub fn chiplet_selector_2(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + 2)
}
pub fn chiplet_selector_3(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + 3)
}
pub fn chiplet_selector_4(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + 4)
}
pub fn chiplet_selector_5(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + 5)
}
pub fn is_hash_row(&self, i: RowIndex) -> bool {
self.chiplet_selector_0(i) == ONE || self.chiplet_s_perm(i) == ONE
}
pub fn chiplet_hasher_state(&self, i: RowIndex) -> [Felt; STATE_WIDTH] {
let mut state = [ZERO; STATE_WIDTH];
for (idx, col_idx) in HASHER_STATE_COL_RANGE.enumerate() {
state[idx] = self.get(i, col_idx);
}
state
}
pub fn chiplet_node_index(&self, i: RowIndex) -> Felt {
self.get(i, HASHER_NODE_INDEX_COL_IDX)
}
pub fn chiplet_mrupdate_id(&self, i: RowIndex) -> Felt {
self.get(i, HASHER_MRUPDATE_ID_COL_IDX)
}
pub fn chiplet_is_boundary(&self, i: RowIndex) -> Felt {
self.get(i, HASHER_IS_BOUNDARY_COL_IDX)
}
pub fn chiplet_direction_bit(&self, i: RowIndex) -> Felt {
self.get(i, HASHER_DIRECTION_BIT_COL_IDX)
}
pub fn chiplet_s_perm(&self, i: RowIndex) -> Felt {
self.get(i, HASHER_S_PERM_COL_IDX)
}
pub fn chiplet_memory_word_addr_lo(&self, i: RowIndex) -> Felt {
self.get(i, MEMORY_WORD_ADDR_LO_COL_IDX)
}
pub fn chiplet_memory_word_addr_hi(&self, i: RowIndex) -> Felt {
self.get(i, MEMORY_WORD_ADDR_HI_COL_IDX)
}
pub fn is_bitwise_row(&self, i: RowIndex) -> bool {
self.chiplet_selector_0(i) == ZERO
&& self.chiplet_s_perm(i) == ZERO
&& self.chiplet_selector_1(i) == ZERO
}
pub fn chiplet_bitwise_a(&self, i: RowIndex) -> Felt {
self.get(i, BITWISE_A_COL_IDX)
}
pub fn chiplet_bitwise_b(&self, i: RowIndex) -> Felt {
self.get(i, BITWISE_B_COL_IDX)
}
pub fn chiplet_bitwise_z(&self, i: RowIndex) -> Felt {
self.get(i, BITWISE_OUTPUT_COL_IDX)
}
pub fn is_memory_row(&self, i: RowIndex) -> bool {
self.chiplet_selector_0(i) == ZERO
&& self.chiplet_s_perm(i) == ZERO
&& self.chiplet_selector_1(i) == ONE
&& self.chiplet_selector_2(i) == ZERO
}
pub fn chiplet_memory_ctx(&self, i: RowIndex) -> Felt {
self.get(i, MEMORY_CTX_COL_IDX)
}
pub fn chiplet_memory_word(&self, i: RowIndex) -> Felt {
self.get(i, MEMORY_WORD_COL_IDX)
}
pub fn chiplet_memory_idx0(&self, i: RowIndex) -> Felt {
self.get(i, MEMORY_IDX0_COL_IDX)
}
pub fn chiplet_memory_idx1(&self, i: RowIndex) -> Felt {
self.get(i, MEMORY_IDX1_COL_IDX)
}
pub fn chiplet_memory_clk(&self, i: RowIndex) -> Felt {
self.get(i, MEMORY_CLK_COL_IDX)
}
pub fn chiplet_memory_value_0(&self, i: RowIndex) -> Felt {
self.get(i, MEMORY_V_COL_RANGE.start)
}
pub fn chiplet_memory_value_1(&self, i: RowIndex) -> Felt {
self.get(i, MEMORY_V_COL_RANGE.start + 1)
}
pub fn chiplet_memory_value_2(&self, i: RowIndex) -> Felt {
self.get(i, MEMORY_V_COL_RANGE.start + 2)
}
pub fn chiplet_memory_value_3(&self, i: RowIndex) -> Felt {
self.get(i, MEMORY_V_COL_RANGE.start + 3)
}
pub fn is_ace_row(&self, i: RowIndex) -> bool {
self.chiplet_selector_0(i) == ZERO
&& self.chiplet_s_perm(i) == ZERO
&& self.chiplet_selector_1(i) == ONE
&& self.chiplet_selector_2(i) == ONE
&& self.chiplet_selector_3(i) == ZERO
}
pub fn chiplet_ace_start_selector(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + SELECTOR_START_IDX)
}
pub fn chiplet_ace_block_selector(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + SELECTOR_BLOCK_IDX)
}
pub fn chiplet_ace_ctx(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + CTX_IDX)
}
pub fn chiplet_ace_ptr(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + PTR_IDX)
}
pub fn chiplet_ace_clk(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + CLK_IDX)
}
pub fn chiplet_ace_eval_op(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + EVAL_OP_IDX)
}
pub fn chiplet_ace_num_eval_rows(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + READ_NUM_EVAL_IDX)
}
pub fn chiplet_ace_id_0(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + ID_0_IDX)
}
pub fn chiplet_ace_v_0_0(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + V_0_0_IDX)
}
pub fn chiplet_ace_v_0_1(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + V_0_1_IDX)
}
pub fn chiplet_ace_wire_0(&self, i: RowIndex) -> [Felt; 3] {
let id_0 = self.chiplet_ace_id_0(i);
let v_0_0 = self.chiplet_ace_v_0_0(i);
let v_0_1 = self.chiplet_ace_v_0_1(i);
[id_0, v_0_0, v_0_1]
}
pub fn chiplet_ace_id_1(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + ID_1_IDX)
}
pub fn chiplet_ace_v_1_0(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + V_1_0_IDX)
}
pub fn chiplet_ace_v_1_1(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + V_1_1_IDX)
}
pub fn chiplet_ace_wire_1(&self, i: RowIndex) -> [Felt; 3] {
let id_1 = self.chiplet_ace_id_1(i);
let v_1_0 = self.chiplet_ace_v_1_0(i);
let v_1_1 = self.chiplet_ace_v_1_1(i);
[id_1, v_1_0, v_1_1]
}
pub fn chiplet_ace_id_2(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + ID_2_IDX)
}
pub fn chiplet_ace_v_2_0(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + V_2_0_IDX)
}
pub fn chiplet_ace_v_2_1(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + V_2_1_IDX)
}
pub fn chiplet_ace_wire_2(&self, i: RowIndex) -> [Felt; 3] {
let id_2 = self.chiplet_ace_id_2(i);
let v_2_0 = self.chiplet_ace_v_2_0(i);
let v_2_1 = self.chiplet_ace_v_2_1(i);
[id_2, v_2_0, v_2_1]
}
pub fn chiplet_ace_m_1(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + M_1_IDX)
}
pub fn chiplet_ace_m_0(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + NUM_ACE_SELECTORS + M_0_IDX)
}
pub fn chiplet_ace_is_read_row(&self, i: RowIndex) -> bool {
self.is_ace_row(i) && self.chiplet_ace_block_selector(i) == ZERO
}
pub fn chiplet_ace_is_eval_row(&self, i: RowIndex) -> bool {
self.is_ace_row(i) && self.chiplet_ace_block_selector(i) == ONE
}
pub fn is_kernel_row(&self, i: RowIndex) -> bool {
self.chiplet_selector_0(i) == ZERO
&& self.chiplet_s_perm(i) == ZERO
&& self.chiplet_selector_1(i) == ONE
&& self.chiplet_selector_2(i) == ONE
&& self.chiplet_selector_3(i) == ONE
&& self.chiplet_selector_4(i) == ZERO
}
pub fn chiplet_kernel_is_first_hash_row(&self, i: RowIndex) -> bool {
self.get(i, CHIPLETS_OFFSET + 5) == ONE
}
pub fn chiplet_kernel_root_0(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + 6)
}
pub fn chiplet_kernel_root_1(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + 7)
}
pub fn chiplet_kernel_root_2(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + 8)
}
pub fn chiplet_kernel_root_3(&self, i: RowIndex) -> Felt {
self.get(i, CHIPLETS_OFFSET + 9)
}
pub fn f_mv(&self, i: RowIndex) -> bool {
self.chiplet_selector_0(i) == ONE && self.chiplet_s_perm(i) == ZERO && self.chiplet_selector_1(i) == ONE && self.chiplet_selector_2(i) == ONE && self.chiplet_selector_3(i) == ZERO }
pub fn f_mu(&self, i: RowIndex) -> bool {
self.chiplet_selector_0(i) == ONE && self.chiplet_s_perm(i) == ZERO && self.chiplet_selector_1(i) == ONE && self.chiplet_selector_2(i) == ONE && self.chiplet_selector_3(i) == ONE }
}