use celestia_types::{
consts::appconsts::{
CONTINUATION_SPARSE_SHARE_CONTENT_SIZE, FIRST_SPARSE_SHARE_CONTENT_SIZE, NAMESPACE_SIZE,
SEQUENCE_LEN_BYTES, SHARE_INFO_BYTES, SHARE_SIZE, SIGNER_SIZE,
},
ShareProof,
};
use serde::{Deserialize, Serialize};
use sha3::{Digest, Keccak256};
#[cfg(feature = "host")]
mod error;
#[cfg(feature = "host")]
pub use error::{ErrorLabels, InclusionServiceError};
#[cfg(feature = "grpc")]
pub mod eqs {
include!("generated/eqs.rs");
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ZKStackEqProofInput {
pub share_proof: ShareProof,
pub share_version: bool,
pub tail_padding: usize,
pub data_availability_root: [u8; 32],
pub batch_number: u32,
pub chain_id: u64,
}
pub struct ZKStackEqProofOutput {
pub keccak_hash: [u8; 32],
pub data_availability_root: [u8; 32],
pub batch_number: u32,
pub chain_id: u64,
}
impl ZKStackEqProofOutput {
pub fn to_vec(&self) -> Vec<u8> {
let mut encoded = Vec::new();
encoded.extend_from_slice(&self.keccak_hash);
encoded.extend_from_slice(&self.data_availability_root);
encoded.extend_from_slice(&self.batch_number.to_le_bytes());
encoded.extend_from_slice(&self.chain_id.to_le_bytes());
encoded
}
#[cfg(feature = "host")]
pub fn from_bytes(data: &[u8]) -> Result<Self, InclusionServiceError> {
if data.len() != 76 {
return Err(InclusionServiceError::OutputDeserializationError);
}
let decoded = ZKStackEqProofOutput {
keccak_hash: data[0..32]
.try_into()
.map_err(|_| InclusionServiceError::OutputDeserializationError)?,
data_availability_root: data[32..64]
.try_into()
.map_err(|_| InclusionServiceError::OutputDeserializationError)?,
batch_number: u32::from_le_bytes(
data[64..68]
.try_into()
.map_err(|_| InclusionServiceError::OutputDeserializationError)?,
),
chain_id: u64::from_le_bytes(
data[68..76]
.try_into()
.map_err(|_| InclusionServiceError::OutputDeserializationError)?,
),
};
Ok(decoded)
}
}
pub fn compute_share_raw_data_keccak(
raw_shares: &[[u8; SHARE_SIZE]],
share_version: bool,
tail_padding: usize,
) -> [u8; 32] {
let mut hasher = Keccak256::new();
let n = raw_shares.len();
let off_first = first_data_offset(share_version);
if n == 1 {
let take = FIRST_SPARSE_SHARE_CONTENT_SIZE - tail_padding;
let s0 = raw_shares[0].as_ref();
hasher.update(&s0[off_first..off_first + take]);
return hasher.finalize().into();
}
{
let s0 = raw_shares[0].as_ref();
let end0 = off_first + FIRST_SPARSE_SHARE_CONTENT_SIZE;
hasher.update(&s0[off_first..end0]);
}
let off_cont = NAMESPACE_SIZE + SHARE_INFO_BYTES;
let last_take = CONTINUATION_SPARSE_SHARE_CONTENT_SIZE - tail_padding;
let last_full = tail_padding == 0;
let full_end = if last_full { n } else { n - 1 };
for i in 1..full_end {
let si = raw_shares[i].as_ref();
let endi = off_cont + CONTINUATION_SPARSE_SHARE_CONTENT_SIZE;
hasher.update(&si[off_cont..endi]);
}
if !last_full {
let slast = raw_shares[n - 1].as_ref();
hasher.update(&slast[off_cont..off_cont + last_take]);
}
hasher.finalize().into()
}
#[inline(always)]
fn first_data_offset(version: bool) -> usize {
let base = NAMESPACE_SIZE + SHARE_INFO_BYTES + SEQUENCE_LEN_BYTES;
if version {
base + SIGNER_SIZE
} else {
base
}
}
#[inline(always)]
pub fn tail_padding_for_len(total_len: usize) -> usize {
if total_len <= FIRST_SPARSE_SHARE_CONTENT_SIZE {
FIRST_SPARSE_SHARE_CONTENT_SIZE - total_len
} else {
let rem = total_len - FIRST_SPARSE_SHARE_CONTENT_SIZE;
let tail_bytes = rem % CONTINUATION_SPARSE_SHARE_CONTENT_SIZE;
if tail_bytes == 0 {
0 } else {
CONTINUATION_SPARSE_SHARE_CONTENT_SIZE - tail_bytes
}
}
}
#[cfg(feature = "host")]
#[inline(always)]
pub fn exact_u8_to_bool(val: u8) -> bool {
match val {
0 => false,
1 => true,
_ => panic!("u8->bool not 0 or 1: {}", val),
}
}
#[cfg(all(test, feature = "host"))]
mod test {
use super::*; use celestia_types::{nmt::Namespace, AppVersion, Blob};
use rand::{rngs::StdRng, Rng, SeedableRng};
use sha3::{Digest, Keccak256};
#[test]
fn test_serialization() {
let output = ZKStackEqProofOutput {
keccak_hash: [0; 32],
data_availability_root: [0; 32],
batch_number: 0u32,
chain_id: 0u64,
};
let encoded = output.to_vec();
let decoded = ZKStackEqProofOutput::from_bytes(&encoded).unwrap();
assert_eq!(output.keccak_hash, decoded.keccak_hash);
assert_eq!(
output.data_availability_root,
decoded.data_availability_root
);
}
#[test]
fn keccak_of_data_matches_keccak_from_blob_shares_randomized() {
let mut rng = StdRng::seed_from_u64(0xCE1E5);
for _ in 0..5 {
let len = rng.gen_range(100usize..=1_000_000usize);
let mut data = vec![0u8; len];
rng.fill(&mut data[..]);
let ns = Namespace::new_v0(&[1, 2, 3, 4, 5]).expect("invalid namespace");
let blob = Blob::new(ns, data.clone(), None, AppVersion::latest())
.expect("blob construction failed");
let share_version = exact_u8_to_bool(blob.share_version);
let shares: Vec<[u8; SHARE_SIZE]> = blob
.to_shares()
.expect("invalid blob->shares")
.iter()
.map(|s| s.data().to_owned())
.collect();
let first = shares.first().expect("no shares emitted");
let info = first[NAMESPACE_SIZE]; let derived_version_byte = info >> 1; let derived_version_bool = exact_u8_to_bool(derived_version_byte);
assert_eq!(
derived_version_bool, share_version,
"share version mismatch"
);
let expected = {
let mut h = Keccak256::new();
h.update(&data);
<[u8; 32]>::from(h.finalize())
};
let tail_padding = tail_padding_for_len(data.len());
let got = compute_share_raw_data_keccak(&shares, derived_version_bool, tail_padding);
assert_eq!(
got, expected,
"keccak(blob shares) != keccak(raw data) for len={len}, version={share_version}"
);
}
}
}